blob: ae668ddac4001a3622d38495a434521a2774ad48 [file] [log] [blame]
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001//===- AArch64InstructionSelector.cpp ----------------------------*- C++ -*-==//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00006//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// AArch64.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +000014#include "AArch64InstrInfo.h"
Tim Northovere9600d82017-02-08 17:57:27 +000015#include "AArch64MachineFunctionInfo.h"
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +000016#include "AArch64RegisterBankInfo.h"
17#include "AArch64RegisterInfo.h"
18#include "AArch64Subtarget.h"
Tim Northoverbdf16242016-10-10 21:50:00 +000019#include "AArch64TargetMachine.h"
Tim Northover9ac0eba2016-11-08 00:45:29 +000020#include "MCTargetDesc/AArch64AddressingModes.h"
Amara Emerson2ff22982019-03-14 22:48:15 +000021#include "llvm/ADT/Optional.h"
Daniel Sanders0b5293f2017-04-06 09:49:34 +000022#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
David Blaikie62651302017-10-26 23:39:54 +000023#include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
Amara Emerson1e8c1642018-07-31 00:09:02 +000024#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
Amara Emerson761ca2e2019-03-19 21:43:05 +000025#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
Aditya Nandakumar75ad9cc2017-04-19 20:48:50 +000026#include "llvm/CodeGen/GlobalISel/Utils.h"
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +000027#include "llvm/CodeGen/MachineBasicBlock.h"
Amara Emerson1abe05c2019-02-21 20:20:16 +000028#include "llvm/CodeGen/MachineConstantPool.h"
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +000029#include "llvm/CodeGen/MachineFunction.h"
30#include "llvm/CodeGen/MachineInstr.h"
31#include "llvm/CodeGen/MachineInstrBuilder.h"
Daniel Sanders0b5293f2017-04-06 09:49:34 +000032#include "llvm/CodeGen/MachineOperand.h"
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +000033#include "llvm/CodeGen/MachineRegisterInfo.h"
34#include "llvm/IR/Type.h"
35#include "llvm/Support/Debug.h"
36#include "llvm/Support/raw_ostream.h"
37
38#define DEBUG_TYPE "aarch64-isel"
39
40using namespace llvm;
41
Daniel Sanders0b5293f2017-04-06 09:49:34 +000042namespace {
43
Daniel Sanderse7b0d662017-04-21 15:59:56 +000044#define GET_GLOBALISEL_PREDICATE_BITSET
45#include "AArch64GenGlobalISel.inc"
46#undef GET_GLOBALISEL_PREDICATE_BITSET
47
Daniel Sanders0b5293f2017-04-06 09:49:34 +000048class AArch64InstructionSelector : public InstructionSelector {
49public:
50 AArch64InstructionSelector(const AArch64TargetMachine &TM,
51 const AArch64Subtarget &STI,
52 const AArch64RegisterBankInfo &RBI);
53
Daniel Sandersf76f3152017-11-16 00:46:35 +000054 bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const override;
David Blaikie62651302017-10-26 23:39:54 +000055 static const char *getName() { return DEBUG_TYPE; }
Daniel Sanders0b5293f2017-04-06 09:49:34 +000056
57private:
58 /// tblgen-erated 'select' implementation, used as the initial selector for
59 /// the patterns that don't require complex C++.
Daniel Sandersf76f3152017-11-16 00:46:35 +000060 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
Daniel Sanders0b5293f2017-04-06 09:49:34 +000061
62 bool selectVaStartAAPCS(MachineInstr &I, MachineFunction &MF,
63 MachineRegisterInfo &MRI) const;
64 bool selectVaStartDarwin(MachineInstr &I, MachineFunction &MF,
65 MachineRegisterInfo &MRI) const;
66
67 bool selectCompareBranch(MachineInstr &I, MachineFunction &MF,
68 MachineRegisterInfo &MRI) const;
69
Amara Emerson9bf092d2019-04-09 21:22:43 +000070 bool selectVectorASHR(MachineInstr &I, MachineRegisterInfo &MRI) const;
71 bool selectVectorSHL(MachineInstr &I, MachineRegisterInfo &MRI) const;
72
Amara Emerson5ec14602018-12-10 18:44:58 +000073 // Helper to generate an equivalent of scalar_to_vector into a new register,
74 // returned via 'Dst'.
Amara Emerson8acb0d92019-03-04 19:16:00 +000075 MachineInstr *emitScalarToVector(unsigned EltSize,
Amara Emerson6bcfa1c2019-02-25 18:52:54 +000076 const TargetRegisterClass *DstRC,
77 unsigned Scalar,
78 MachineIRBuilder &MIRBuilder) const;
Jessica Paquette16d67a32019-03-13 23:22:23 +000079
80 /// Emit a lane insert into \p DstReg, or a new vector register if None is
81 /// provided.
82 ///
83 /// The lane inserted into is defined by \p LaneIdx. The vector source
84 /// register is given by \p SrcReg. The register containing the element is
85 /// given by \p EltReg.
86 MachineInstr *emitLaneInsert(Optional<unsigned> DstReg, unsigned SrcReg,
87 unsigned EltReg, unsigned LaneIdx,
88 const RegisterBank &RB,
89 MachineIRBuilder &MIRBuilder) const;
Jessica Paquette5aff1f42019-03-14 18:01:30 +000090 bool selectInsertElt(MachineInstr &I, MachineRegisterInfo &MRI) const;
Amara Emerson5ec14602018-12-10 18:44:58 +000091 bool selectBuildVector(MachineInstr &I, MachineRegisterInfo &MRI) const;
Amara Emerson8cb186c2018-12-20 01:11:04 +000092 bool selectMergeValues(MachineInstr &I, MachineRegisterInfo &MRI) const;
Jessica Paquette245047d2019-01-24 22:00:41 +000093 bool selectUnmergeValues(MachineInstr &I, MachineRegisterInfo &MRI) const;
Amara Emerson5ec14602018-12-10 18:44:58 +000094
Amara Emerson1abe05c2019-02-21 20:20:16 +000095 void collectShuffleMaskIndices(MachineInstr &I, MachineRegisterInfo &MRI,
Amara Emerson2806fd02019-04-12 21:31:21 +000096 SmallVectorImpl<Optional<int>> &Idxs) const;
Amara Emerson1abe05c2019-02-21 20:20:16 +000097 bool selectShuffleVector(MachineInstr &I, MachineRegisterInfo &MRI) const;
Jessica Paquette607774c2019-03-11 22:18:01 +000098 bool selectExtractElt(MachineInstr &I, MachineRegisterInfo &MRI) const;
Amara Emerson2ff22982019-03-14 22:48:15 +000099 bool selectConcatVectors(MachineInstr &I, MachineRegisterInfo &MRI) const;
Amara Emersond61b89b2019-03-14 22:48:18 +0000100 bool selectSplitVectorUnmerge(MachineInstr &I,
101 MachineRegisterInfo &MRI) const;
Jessica Paquette22c62152019-04-02 19:57:26 +0000102 bool selectIntrinsicWithSideEffects(MachineInstr &I,
103 MachineRegisterInfo &MRI) const;
Jessica Paquette7f6fe7c2019-04-29 20:58:17 +0000104 bool selectIntrinsic(MachineInstr &I, MachineRegisterInfo &MRI) const;
Amara Emerson9bf092d2019-04-09 21:22:43 +0000105 bool selectVectorICmp(MachineInstr &I, MachineRegisterInfo &MRI) const;
Jessica Paquette991cb392019-04-23 20:46:19 +0000106 bool selectIntrinsicTrunc(MachineInstr &I, MachineRegisterInfo &MRI) const;
Jessica Paquette4fe75742019-04-23 23:03:03 +0000107 bool selectIntrinsicRound(MachineInstr &I, MachineRegisterInfo &MRI) const;
Amara Emerson1abe05c2019-02-21 20:20:16 +0000108 unsigned emitConstantPoolEntry(Constant *CPVal, MachineFunction &MF) const;
109 MachineInstr *emitLoadFromConstantPool(Constant *CPVal,
110 MachineIRBuilder &MIRBuilder) const;
Amara Emerson2ff22982019-03-14 22:48:15 +0000111
112 // Emit a vector concat operation.
113 MachineInstr *emitVectorConcat(Optional<unsigned> Dst, unsigned Op1,
114 unsigned Op2,
Amara Emerson8acb0d92019-03-04 19:16:00 +0000115 MachineIRBuilder &MIRBuilder) const;
Amara Emersond61b89b2019-03-14 22:48:18 +0000116 MachineInstr *emitExtractVectorElt(Optional<unsigned> DstReg,
117 const RegisterBank &DstRB, LLT ScalarTy,
118 unsigned VecReg, unsigned LaneIdx,
119 MachineIRBuilder &MIRBuilder) const;
Amara Emerson1abe05c2019-02-21 20:20:16 +0000120
Daniel Sanders1e4569f2017-10-20 20:55:29 +0000121 ComplexRendererFns selectArithImmed(MachineOperand &Root) const;
Daniel Sanders0b5293f2017-04-06 09:49:34 +0000122
Daniel Sanders1e4569f2017-10-20 20:55:29 +0000123 ComplexRendererFns selectAddrModeUnscaled(MachineOperand &Root,
124 unsigned Size) const;
Daniel Sandersea8711b2017-10-16 03:36:29 +0000125
Daniel Sanders1e4569f2017-10-20 20:55:29 +0000126 ComplexRendererFns selectAddrModeUnscaled8(MachineOperand &Root) const {
Daniel Sandersea8711b2017-10-16 03:36:29 +0000127 return selectAddrModeUnscaled(Root, 1);
128 }
Daniel Sanders1e4569f2017-10-20 20:55:29 +0000129 ComplexRendererFns selectAddrModeUnscaled16(MachineOperand &Root) const {
Daniel Sandersea8711b2017-10-16 03:36:29 +0000130 return selectAddrModeUnscaled(Root, 2);
131 }
Daniel Sanders1e4569f2017-10-20 20:55:29 +0000132 ComplexRendererFns selectAddrModeUnscaled32(MachineOperand &Root) const {
Daniel Sandersea8711b2017-10-16 03:36:29 +0000133 return selectAddrModeUnscaled(Root, 4);
134 }
Daniel Sanders1e4569f2017-10-20 20:55:29 +0000135 ComplexRendererFns selectAddrModeUnscaled64(MachineOperand &Root) const {
Daniel Sandersea8711b2017-10-16 03:36:29 +0000136 return selectAddrModeUnscaled(Root, 8);
137 }
Daniel Sanders1e4569f2017-10-20 20:55:29 +0000138 ComplexRendererFns selectAddrModeUnscaled128(MachineOperand &Root) const {
Daniel Sandersea8711b2017-10-16 03:36:29 +0000139 return selectAddrModeUnscaled(Root, 16);
140 }
141
Daniel Sanders1e4569f2017-10-20 20:55:29 +0000142 ComplexRendererFns selectAddrModeIndexed(MachineOperand &Root,
143 unsigned Size) const;
Daniel Sandersea8711b2017-10-16 03:36:29 +0000144 template <int Width>
Daniel Sanders1e4569f2017-10-20 20:55:29 +0000145 ComplexRendererFns selectAddrModeIndexed(MachineOperand &Root) const {
Daniel Sandersea8711b2017-10-16 03:36:29 +0000146 return selectAddrModeIndexed(Root, Width / 8);
147 }
148
Volkan Kelesf7f25682018-01-16 18:44:05 +0000149 void renderTruncImm(MachineInstrBuilder &MIB, const MachineInstr &MI) const;
150
Amara Emerson1e8c1642018-07-31 00:09:02 +0000151 // Materialize a GlobalValue or BlockAddress using a movz+movk sequence.
152 void materializeLargeCMVal(MachineInstr &I, const Value *V,
153 unsigned char OpFlags) const;
154
Amara Emerson761ca2e2019-03-19 21:43:05 +0000155 // Optimization methods.
156
157 // Helper function to check if a reg def is an MI with a given opcode and
158 // returns it if so.
159 MachineInstr *findMIFromReg(unsigned Reg, unsigned Opc,
160 MachineIRBuilder &MIB) const {
161 auto *Def = MIB.getMRI()->getVRegDef(Reg);
162 if (!Def || Def->getOpcode() != Opc)
163 return nullptr;
164 return Def;
165 }
166
167 bool tryOptVectorShuffle(MachineInstr &I) const;
168 bool tryOptVectorDup(MachineInstr &MI) const;
169
Daniel Sanders0b5293f2017-04-06 09:49:34 +0000170 const AArch64TargetMachine &TM;
171 const AArch64Subtarget &STI;
172 const AArch64InstrInfo &TII;
173 const AArch64RegisterInfo &TRI;
174 const AArch64RegisterBankInfo &RBI;
Daniel Sanderse7b0d662017-04-21 15:59:56 +0000175
Daniel Sanderse9fdba32017-04-29 17:30:09 +0000176#define GET_GLOBALISEL_PREDICATES_DECL
177#include "AArch64GenGlobalISel.inc"
178#undef GET_GLOBALISEL_PREDICATES_DECL
Daniel Sanders0b5293f2017-04-06 09:49:34 +0000179
180// We declare the temporaries used by selectImpl() in the class to minimize the
181// cost of constructing placeholder values.
182#define GET_GLOBALISEL_TEMPORARIES_DECL
183#include "AArch64GenGlobalISel.inc"
184#undef GET_GLOBALISEL_TEMPORARIES_DECL
185};
186
187} // end anonymous namespace
188
Daniel Sanders8a4bae92017-03-14 21:32:08 +0000189#define GET_GLOBALISEL_IMPL
Ahmed Bougacha36f70352016-12-21 23:26:20 +0000190#include "AArch64GenGlobalISel.inc"
Daniel Sanders8a4bae92017-03-14 21:32:08 +0000191#undef GET_GLOBALISEL_IMPL
Ahmed Bougacha36f70352016-12-21 23:26:20 +0000192
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000193AArch64InstructionSelector::AArch64InstructionSelector(
Tim Northoverbdf16242016-10-10 21:50:00 +0000194 const AArch64TargetMachine &TM, const AArch64Subtarget &STI,
195 const AArch64RegisterBankInfo &RBI)
Daniel Sanders8a4bae92017-03-14 21:32:08 +0000196 : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
Daniel Sanderse9fdba32017-04-29 17:30:09 +0000197 TRI(*STI.getRegisterInfo()), RBI(RBI),
198#define GET_GLOBALISEL_PREDICATES_INIT
199#include "AArch64GenGlobalISel.inc"
200#undef GET_GLOBALISEL_PREDICATES_INIT
Daniel Sanders8a4bae92017-03-14 21:32:08 +0000201#define GET_GLOBALISEL_TEMPORARIES_INIT
202#include "AArch64GenGlobalISel.inc"
203#undef GET_GLOBALISEL_TEMPORARIES_INIT
204{
205}
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000206
Tim Northoverfb8d9892016-10-12 22:49:15 +0000207// FIXME: This should be target-independent, inferred from the types declared
208// for each class in the bank.
209static const TargetRegisterClass *
210getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB,
Amara Emerson3838ed02018-02-02 18:03:30 +0000211 const RegisterBankInfo &RBI,
212 bool GetAllRegSet = false) {
Tim Northoverfb8d9892016-10-12 22:49:15 +0000213 if (RB.getID() == AArch64::GPRRegBankID) {
214 if (Ty.getSizeInBits() <= 32)
Amara Emerson3838ed02018-02-02 18:03:30 +0000215 return GetAllRegSet ? &AArch64::GPR32allRegClass
216 : &AArch64::GPR32RegClass;
Tim Northoverfb8d9892016-10-12 22:49:15 +0000217 if (Ty.getSizeInBits() == 64)
Amara Emerson3838ed02018-02-02 18:03:30 +0000218 return GetAllRegSet ? &AArch64::GPR64allRegClass
219 : &AArch64::GPR64RegClass;
Tim Northoverfb8d9892016-10-12 22:49:15 +0000220 return nullptr;
221 }
222
223 if (RB.getID() == AArch64::FPRRegBankID) {
Amara Emerson3838ed02018-02-02 18:03:30 +0000224 if (Ty.getSizeInBits() <= 16)
225 return &AArch64::FPR16RegClass;
Tim Northoverfb8d9892016-10-12 22:49:15 +0000226 if (Ty.getSizeInBits() == 32)
227 return &AArch64::FPR32RegClass;
228 if (Ty.getSizeInBits() == 64)
229 return &AArch64::FPR64RegClass;
230 if (Ty.getSizeInBits() == 128)
231 return &AArch64::FPR128RegClass;
232 return nullptr;
233 }
234
235 return nullptr;
236}
237
Jessica Paquette245047d2019-01-24 22:00:41 +0000238/// Given a register bank, and size in bits, return the smallest register class
239/// that can represent that combination.
Benjamin Kramer711950c2019-02-11 15:16:21 +0000240static const TargetRegisterClass *
241getMinClassForRegBank(const RegisterBank &RB, unsigned SizeInBits,
242 bool GetAllRegSet = false) {
Jessica Paquette245047d2019-01-24 22:00:41 +0000243 unsigned RegBankID = RB.getID();
244
245 if (RegBankID == AArch64::GPRRegBankID) {
246 if (SizeInBits <= 32)
247 return GetAllRegSet ? &AArch64::GPR32allRegClass
248 : &AArch64::GPR32RegClass;
249 if (SizeInBits == 64)
250 return GetAllRegSet ? &AArch64::GPR64allRegClass
251 : &AArch64::GPR64RegClass;
252 }
253
254 if (RegBankID == AArch64::FPRRegBankID) {
255 switch (SizeInBits) {
256 default:
257 return nullptr;
258 case 8:
259 return &AArch64::FPR8RegClass;
260 case 16:
261 return &AArch64::FPR16RegClass;
262 case 32:
263 return &AArch64::FPR32RegClass;
264 case 64:
265 return &AArch64::FPR64RegClass;
266 case 128:
267 return &AArch64::FPR128RegClass;
268 }
269 }
270
271 return nullptr;
272}
273
274/// Returns the correct subregister to use for a given register class.
275static bool getSubRegForClass(const TargetRegisterClass *RC,
276 const TargetRegisterInfo &TRI, unsigned &SubReg) {
277 switch (TRI.getRegSizeInBits(*RC)) {
278 case 8:
279 SubReg = AArch64::bsub;
280 break;
281 case 16:
282 SubReg = AArch64::hsub;
283 break;
284 case 32:
285 if (RC == &AArch64::GPR32RegClass)
286 SubReg = AArch64::sub_32;
287 else
288 SubReg = AArch64::ssub;
289 break;
290 case 64:
291 SubReg = AArch64::dsub;
292 break;
293 default:
294 LLVM_DEBUG(
295 dbgs() << "Couldn't find appropriate subregister for register class.");
296 return false;
297 }
298
299 return true;
300}
301
Ahmed Bougacha59e160a2016-08-16 14:37:40 +0000302/// Check whether \p I is a currently unsupported binary operation:
303/// - it has an unsized type
304/// - an operand is not a vreg
305/// - all operands are not in the same bank
306/// These are checks that should someday live in the verifier, but right now,
307/// these are mostly limitations of the aarch64 selector.
308static bool unsupportedBinOp(const MachineInstr &I,
309 const AArch64RegisterBankInfo &RBI,
310 const MachineRegisterInfo &MRI,
311 const AArch64RegisterInfo &TRI) {
Tim Northover0f140c72016-09-09 11:46:34 +0000312 LLT Ty = MRI.getType(I.getOperand(0).getReg());
Tim Northover32a078a2016-09-15 10:09:59 +0000313 if (!Ty.isValid()) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000314 LLVM_DEBUG(dbgs() << "Generic binop register should be typed\n");
Ahmed Bougacha59e160a2016-08-16 14:37:40 +0000315 return true;
316 }
317
318 const RegisterBank *PrevOpBank = nullptr;
319 for (auto &MO : I.operands()) {
320 // FIXME: Support non-register operands.
321 if (!MO.isReg()) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000322 LLVM_DEBUG(dbgs() << "Generic inst non-reg operands are unsupported\n");
Ahmed Bougacha59e160a2016-08-16 14:37:40 +0000323 return true;
324 }
325
326 // FIXME: Can generic operations have physical registers operands? If
327 // so, this will need to be taught about that, and we'll need to get the
328 // bank out of the minimal class for the register.
329 // Either way, this needs to be documented (and possibly verified).
330 if (!TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000331 LLVM_DEBUG(dbgs() << "Generic inst has physical register operand\n");
Ahmed Bougacha59e160a2016-08-16 14:37:40 +0000332 return true;
333 }
334
335 const RegisterBank *OpBank = RBI.getRegBank(MO.getReg(), MRI, TRI);
336 if (!OpBank) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000337 LLVM_DEBUG(dbgs() << "Generic register has no bank or class\n");
Ahmed Bougacha59e160a2016-08-16 14:37:40 +0000338 return true;
339 }
340
341 if (PrevOpBank && OpBank != PrevOpBank) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000342 LLVM_DEBUG(dbgs() << "Generic inst operands have different banks\n");
Ahmed Bougacha59e160a2016-08-16 14:37:40 +0000343 return true;
344 }
345 PrevOpBank = OpBank;
346 }
347 return false;
348}
349
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000350/// Select the AArch64 opcode for the basic binary operation \p GenericOpc
Ahmed Bougachacfb384d2017-01-23 21:10:05 +0000351/// (such as G_OR or G_SDIV), appropriate for the register bank \p RegBankID
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000352/// and of size \p OpSize.
353/// \returns \p GenericOpc if the combination is unsupported.
354static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID,
355 unsigned OpSize) {
356 switch (RegBankID) {
357 case AArch64::GPRRegBankID:
Ahmed Bougacha05a5f7d2017-01-25 02:41:38 +0000358 if (OpSize == 32) {
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000359 switch (GenericOpc) {
Ahmed Bougacha2ac5bf92016-08-16 14:02:47 +0000360 case TargetOpcode::G_SHL:
361 return AArch64::LSLVWr;
362 case TargetOpcode::G_LSHR:
363 return AArch64::LSRVWr;
364 case TargetOpcode::G_ASHR:
365 return AArch64::ASRVWr;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000366 default:
367 return GenericOpc;
368 }
Tim Northover55782222016-10-18 20:03:48 +0000369 } else if (OpSize == 64) {
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000370 switch (GenericOpc) {
Tim Northover2fda4b02016-10-10 21:49:49 +0000371 case TargetOpcode::G_GEP:
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000372 return AArch64::ADDXrr;
Ahmed Bougacha2ac5bf92016-08-16 14:02:47 +0000373 case TargetOpcode::G_SHL:
374 return AArch64::LSLVXr;
375 case TargetOpcode::G_LSHR:
376 return AArch64::LSRVXr;
377 case TargetOpcode::G_ASHR:
378 return AArch64::ASRVXr;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000379 default:
380 return GenericOpc;
381 }
382 }
Simon Pilgrim9e901522017-07-08 19:28:24 +0000383 break;
Ahmed Bougacha33e19fe2016-08-18 16:05:11 +0000384 case AArch64::FPRRegBankID:
385 switch (OpSize) {
386 case 32:
387 switch (GenericOpc) {
388 case TargetOpcode::G_FADD:
389 return AArch64::FADDSrr;
390 case TargetOpcode::G_FSUB:
391 return AArch64::FSUBSrr;
392 case TargetOpcode::G_FMUL:
393 return AArch64::FMULSrr;
394 case TargetOpcode::G_FDIV:
395 return AArch64::FDIVSrr;
396 default:
397 return GenericOpc;
398 }
399 case 64:
400 switch (GenericOpc) {
401 case TargetOpcode::G_FADD:
402 return AArch64::FADDDrr;
403 case TargetOpcode::G_FSUB:
404 return AArch64::FSUBDrr;
405 case TargetOpcode::G_FMUL:
406 return AArch64::FMULDrr;
407 case TargetOpcode::G_FDIV:
408 return AArch64::FDIVDrr;
Quentin Colombet0e531272016-10-11 00:21:11 +0000409 case TargetOpcode::G_OR:
410 return AArch64::ORRv8i8;
Ahmed Bougacha33e19fe2016-08-18 16:05:11 +0000411 default:
412 return GenericOpc;
413 }
414 }
Simon Pilgrim9e901522017-07-08 19:28:24 +0000415 break;
416 }
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000417 return GenericOpc;
418}
419
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000420/// Select the AArch64 opcode for the G_LOAD or G_STORE operation \p GenericOpc,
421/// appropriate for the (value) register bank \p RegBankID and of memory access
422/// size \p OpSize. This returns the variant with the base+unsigned-immediate
423/// addressing mode (e.g., LDRXui).
424/// \returns \p GenericOpc if the combination is unsupported.
425static unsigned selectLoadStoreUIOp(unsigned GenericOpc, unsigned RegBankID,
426 unsigned OpSize) {
427 const bool isStore = GenericOpc == TargetOpcode::G_STORE;
428 switch (RegBankID) {
429 case AArch64::GPRRegBankID:
430 switch (OpSize) {
Tim Northover020d1042016-10-17 18:36:53 +0000431 case 8:
432 return isStore ? AArch64::STRBBui : AArch64::LDRBBui;
433 case 16:
434 return isStore ? AArch64::STRHHui : AArch64::LDRHHui;
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000435 case 32:
436 return isStore ? AArch64::STRWui : AArch64::LDRWui;
437 case 64:
438 return isStore ? AArch64::STRXui : AArch64::LDRXui;
439 }
Simon Pilgrim9e901522017-07-08 19:28:24 +0000440 break;
Quentin Colombetd2623f8e2016-10-11 00:21:14 +0000441 case AArch64::FPRRegBankID:
442 switch (OpSize) {
Tim Northover020d1042016-10-17 18:36:53 +0000443 case 8:
444 return isStore ? AArch64::STRBui : AArch64::LDRBui;
445 case 16:
446 return isStore ? AArch64::STRHui : AArch64::LDRHui;
Quentin Colombetd2623f8e2016-10-11 00:21:14 +0000447 case 32:
448 return isStore ? AArch64::STRSui : AArch64::LDRSui;
449 case 64:
450 return isStore ? AArch64::STRDui : AArch64::LDRDui;
451 }
Simon Pilgrim9e901522017-07-08 19:28:24 +0000452 break;
453 }
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000454 return GenericOpc;
455}
456
Benjamin Kramer1411ecf2019-01-24 23:39:47 +0000457#ifndef NDEBUG
Jessica Paquette245047d2019-01-24 22:00:41 +0000458/// Helper function that verifies that we have a valid copy at the end of
459/// selectCopy. Verifies that the source and dest have the expected sizes and
460/// then returns true.
461static bool isValidCopy(const MachineInstr &I, const RegisterBank &DstBank,
462 const MachineRegisterInfo &MRI,
463 const TargetRegisterInfo &TRI,
464 const RegisterBankInfo &RBI) {
465 const unsigned DstReg = I.getOperand(0).getReg();
466 const unsigned SrcReg = I.getOperand(1).getReg();
467 const unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
468 const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
Amara Emersondb211892018-02-20 05:11:57 +0000469
Jessica Paquette245047d2019-01-24 22:00:41 +0000470 // Make sure the size of the source and dest line up.
471 assert(
472 (DstSize == SrcSize ||
473 // Copies are a mean to setup initial types, the number of
474 // bits may not exactly match.
475 (TargetRegisterInfo::isPhysicalRegister(SrcReg) && DstSize <= SrcSize) ||
476 // Copies are a mean to copy bits around, as long as we are
477 // on the same register class, that's fine. Otherwise, that
478 // means we need some SUBREG_TO_REG or AND & co.
479 (((DstSize + 31) / 32 == (SrcSize + 31) / 32) && DstSize > SrcSize)) &&
480 "Copy with different width?!");
481
482 // Check the size of the destination.
483 assert((DstSize <= 64 || DstBank.getID() == AArch64::FPRRegBankID) &&
484 "GPRs cannot get more than 64-bit width values");
485
486 return true;
487}
Benjamin Kramer1411ecf2019-01-24 23:39:47 +0000488#endif
Jessica Paquette245047d2019-01-24 22:00:41 +0000489
490/// Helper function for selectCopy. Inserts a subregister copy from
491/// \p *From to \p *To, linking it up to \p I.
492///
493/// e.g, given I = "Dst = COPY SrcReg", we'll transform that into
494///
495/// CopyReg (From class) = COPY SrcReg
496/// SubRegCopy (To class) = COPY CopyReg:SubReg
497/// Dst = COPY SubRegCopy
Amara Emerson3739a202019-03-15 21:59:50 +0000498static bool selectSubregisterCopy(MachineInstr &I, MachineRegisterInfo &MRI,
Jessica Paquette245047d2019-01-24 22:00:41 +0000499 const RegisterBankInfo &RBI, unsigned SrcReg,
500 const TargetRegisterClass *From,
501 const TargetRegisterClass *To,
502 unsigned SubReg) {
Amara Emerson3739a202019-03-15 21:59:50 +0000503 MachineIRBuilder MIB(I);
504 auto Copy = MIB.buildCopy({From}, {SrcReg});
Amara Emerson86271782019-03-18 19:20:10 +0000505 auto SubRegCopy = MIB.buildInstr(TargetOpcode::COPY, {To}, {})
506 .addReg(Copy.getReg(0), 0, SubReg);
Amara Emersondb211892018-02-20 05:11:57 +0000507 MachineOperand &RegOp = I.getOperand(1);
Amara Emerson3739a202019-03-15 21:59:50 +0000508 RegOp.setReg(SubRegCopy.getReg(0));
Jessica Paquette245047d2019-01-24 22:00:41 +0000509
510 // It's possible that the destination register won't be constrained. Make
511 // sure that happens.
512 if (!TargetRegisterInfo::isPhysicalRegister(I.getOperand(0).getReg()))
513 RBI.constrainGenericRegister(I.getOperand(0).getReg(), *To, MRI);
514
Amara Emersondb211892018-02-20 05:11:57 +0000515 return true;
516}
517
Quentin Colombetcb629a82016-10-12 03:57:49 +0000518static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII,
519 MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
520 const RegisterBankInfo &RBI) {
521
522 unsigned DstReg = I.getOperand(0).getReg();
Amara Emersondb211892018-02-20 05:11:57 +0000523 unsigned SrcReg = I.getOperand(1).getReg();
Jessica Paquette245047d2019-01-24 22:00:41 +0000524 const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
525 const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
526 const TargetRegisterClass *DstRC = getMinClassForRegBank(
527 DstRegBank, RBI.getSizeInBits(DstReg, MRI, TRI), true);
528 if (!DstRC) {
529 LLVM_DEBUG(dbgs() << "Unexpected dest size "
530 << RBI.getSizeInBits(DstReg, MRI, TRI) << '\n');
Amara Emerson3838ed02018-02-02 18:03:30 +0000531 return false;
Quentin Colombetcb629a82016-10-12 03:57:49 +0000532 }
533
Jessica Paquette245047d2019-01-24 22:00:41 +0000534 // A couple helpers below, for making sure that the copy we produce is valid.
535
536 // Set to true if we insert a SUBREG_TO_REG. If we do this, then we don't want
537 // to verify that the src and dst are the same size, since that's handled by
538 // the SUBREG_TO_REG.
539 bool KnownValid = false;
540
541 // Returns true, or asserts if something we don't expect happens. Instead of
542 // returning true, we return isValidCopy() to ensure that we verify the
543 // result.
Jessica Paquette76c40f82019-01-24 22:51:31 +0000544 auto CheckCopy = [&]() {
Jessica Paquette245047d2019-01-24 22:00:41 +0000545 // If we have a bitcast or something, we can't have physical registers.
546 assert(
Simon Pilgrimdea61742019-01-25 11:38:40 +0000547 (I.isCopy() ||
548 (!TargetRegisterInfo::isPhysicalRegister(I.getOperand(0).getReg()) &&
549 !TargetRegisterInfo::isPhysicalRegister(I.getOperand(1).getReg()))) &&
550 "No phys reg on generic operator!");
Jessica Paquette245047d2019-01-24 22:00:41 +0000551 assert(KnownValid || isValidCopy(I, DstRegBank, MRI, TRI, RBI));
Jonas Hahnfeld65a401f2019-03-04 08:51:32 +0000552 (void)KnownValid;
Jessica Paquette245047d2019-01-24 22:00:41 +0000553 return true;
554 };
555
556 // Is this a copy? If so, then we may need to insert a subregister copy, or
557 // a SUBREG_TO_REG.
558 if (I.isCopy()) {
559 // Yes. Check if there's anything to fix up.
560 const TargetRegisterClass *SrcRC = getMinClassForRegBank(
561 SrcRegBank, RBI.getSizeInBits(SrcReg, MRI, TRI), true);
Amara Emerson7e9f3482018-02-18 17:10:49 +0000562 if (!SrcRC) {
Jessica Paquette245047d2019-01-24 22:00:41 +0000563 LLVM_DEBUG(dbgs() << "Couldn't determine source register class\n");
564 return false;
Amara Emerson7e9f3482018-02-18 17:10:49 +0000565 }
Jessica Paquette245047d2019-01-24 22:00:41 +0000566
567 // Is this a cross-bank copy?
568 if (DstRegBank.getID() != SrcRegBank.getID()) {
569 // If we're doing a cross-bank copy on different-sized registers, we need
570 // to do a bit more work.
571 unsigned SrcSize = TRI.getRegSizeInBits(*SrcRC);
572 unsigned DstSize = TRI.getRegSizeInBits(*DstRC);
573
574 if (SrcSize > DstSize) {
575 // We're doing a cross-bank copy into a smaller register. We need a
576 // subregister copy. First, get a register class that's on the same bank
577 // as the destination, but the same size as the source.
578 const TargetRegisterClass *SubregRC =
579 getMinClassForRegBank(DstRegBank, SrcSize, true);
580 assert(SubregRC && "Didn't get a register class for subreg?");
581
582 // Get the appropriate subregister for the destination.
583 unsigned SubReg = 0;
584 if (!getSubRegForClass(DstRC, TRI, SubReg)) {
585 LLVM_DEBUG(dbgs() << "Couldn't determine subregister for copy.\n");
586 return false;
587 }
588
589 // Now, insert a subregister copy using the new register class.
Amara Emerson3739a202019-03-15 21:59:50 +0000590 selectSubregisterCopy(I, MRI, RBI, SrcReg, SubregRC, DstRC, SubReg);
Jessica Paquette245047d2019-01-24 22:00:41 +0000591 return CheckCopy();
592 }
593
594 else if (DstRegBank.getID() == AArch64::GPRRegBankID && DstSize == 32 &&
595 SrcSize == 16) {
596 // Special case for FPR16 to GPR32.
597 // FIXME: This can probably be generalized like the above case.
598 unsigned PromoteReg =
599 MRI.createVirtualRegister(&AArch64::FPR32RegClass);
600 BuildMI(*I.getParent(), I, I.getDebugLoc(),
601 TII.get(AArch64::SUBREG_TO_REG), PromoteReg)
602 .addImm(0)
603 .addUse(SrcReg)
604 .addImm(AArch64::hsub);
605 MachineOperand &RegOp = I.getOperand(1);
606 RegOp.setReg(PromoteReg);
607
608 // Promise that the copy is implicitly validated by the SUBREG_TO_REG.
609 KnownValid = true;
610 }
Amara Emerson7e9f3482018-02-18 17:10:49 +0000611 }
Jessica Paquette245047d2019-01-24 22:00:41 +0000612
613 // If the destination is a physical register, then there's nothing to
614 // change, so we're done.
615 if (TargetRegisterInfo::isPhysicalRegister(DstReg))
616 return CheckCopy();
Amara Emerson7e9f3482018-02-18 17:10:49 +0000617 }
618
Jessica Paquette245047d2019-01-24 22:00:41 +0000619 // No need to constrain SrcReg. It will get constrained when we hit another
620 // of its use or its defs. Copies do not have constraints.
621 if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000622 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
623 << " operand\n");
Quentin Colombetcb629a82016-10-12 03:57:49 +0000624 return false;
625 }
626 I.setDesc(TII.get(AArch64::COPY));
Jessica Paquette245047d2019-01-24 22:00:41 +0000627 return CheckCopy();
Quentin Colombetcb629a82016-10-12 03:57:49 +0000628}
629
Tim Northover69271c62016-10-12 22:49:11 +0000630static unsigned selectFPConvOpc(unsigned GenericOpc, LLT DstTy, LLT SrcTy) {
631 if (!DstTy.isScalar() || !SrcTy.isScalar())
632 return GenericOpc;
633
634 const unsigned DstSize = DstTy.getSizeInBits();
635 const unsigned SrcSize = SrcTy.getSizeInBits();
636
637 switch (DstSize) {
638 case 32:
639 switch (SrcSize) {
640 case 32:
641 switch (GenericOpc) {
642 case TargetOpcode::G_SITOFP:
643 return AArch64::SCVTFUWSri;
644 case TargetOpcode::G_UITOFP:
645 return AArch64::UCVTFUWSri;
646 case TargetOpcode::G_FPTOSI:
647 return AArch64::FCVTZSUWSr;
648 case TargetOpcode::G_FPTOUI:
649 return AArch64::FCVTZUUWSr;
650 default:
651 return GenericOpc;
652 }
653 case 64:
654 switch (GenericOpc) {
655 case TargetOpcode::G_SITOFP:
656 return AArch64::SCVTFUXSri;
657 case TargetOpcode::G_UITOFP:
658 return AArch64::UCVTFUXSri;
659 case TargetOpcode::G_FPTOSI:
660 return AArch64::FCVTZSUWDr;
661 case TargetOpcode::G_FPTOUI:
662 return AArch64::FCVTZUUWDr;
663 default:
664 return GenericOpc;
665 }
666 default:
667 return GenericOpc;
668 }
669 case 64:
670 switch (SrcSize) {
671 case 32:
672 switch (GenericOpc) {
673 case TargetOpcode::G_SITOFP:
674 return AArch64::SCVTFUWDri;
675 case TargetOpcode::G_UITOFP:
676 return AArch64::UCVTFUWDri;
677 case TargetOpcode::G_FPTOSI:
678 return AArch64::FCVTZSUXSr;
679 case TargetOpcode::G_FPTOUI:
680 return AArch64::FCVTZUUXSr;
681 default:
682 return GenericOpc;
683 }
684 case 64:
685 switch (GenericOpc) {
686 case TargetOpcode::G_SITOFP:
687 return AArch64::SCVTFUXDri;
688 case TargetOpcode::G_UITOFP:
689 return AArch64::UCVTFUXDri;
690 case TargetOpcode::G_FPTOSI:
691 return AArch64::FCVTZSUXDr;
692 case TargetOpcode::G_FPTOUI:
693 return AArch64::FCVTZUUXDr;
694 default:
695 return GenericOpc;
696 }
697 default:
698 return GenericOpc;
699 }
700 default:
701 return GenericOpc;
702 };
703 return GenericOpc;
704}
705
Tim Northover6c02ad52016-10-12 22:49:04 +0000706static AArch64CC::CondCode changeICMPPredToAArch64CC(CmpInst::Predicate P) {
707 switch (P) {
708 default:
709 llvm_unreachable("Unknown condition code!");
710 case CmpInst::ICMP_NE:
711 return AArch64CC::NE;
712 case CmpInst::ICMP_EQ:
713 return AArch64CC::EQ;
714 case CmpInst::ICMP_SGT:
715 return AArch64CC::GT;
716 case CmpInst::ICMP_SGE:
717 return AArch64CC::GE;
718 case CmpInst::ICMP_SLT:
719 return AArch64CC::LT;
720 case CmpInst::ICMP_SLE:
721 return AArch64CC::LE;
722 case CmpInst::ICMP_UGT:
723 return AArch64CC::HI;
724 case CmpInst::ICMP_UGE:
725 return AArch64CC::HS;
726 case CmpInst::ICMP_ULT:
727 return AArch64CC::LO;
728 case CmpInst::ICMP_ULE:
729 return AArch64CC::LS;
730 }
731}
732
Tim Northover7dd378d2016-10-12 22:49:07 +0000733static void changeFCMPPredToAArch64CC(CmpInst::Predicate P,
734 AArch64CC::CondCode &CondCode,
735 AArch64CC::CondCode &CondCode2) {
736 CondCode2 = AArch64CC::AL;
737 switch (P) {
738 default:
739 llvm_unreachable("Unknown FP condition!");
740 case CmpInst::FCMP_OEQ:
741 CondCode = AArch64CC::EQ;
742 break;
743 case CmpInst::FCMP_OGT:
744 CondCode = AArch64CC::GT;
745 break;
746 case CmpInst::FCMP_OGE:
747 CondCode = AArch64CC::GE;
748 break;
749 case CmpInst::FCMP_OLT:
750 CondCode = AArch64CC::MI;
751 break;
752 case CmpInst::FCMP_OLE:
753 CondCode = AArch64CC::LS;
754 break;
755 case CmpInst::FCMP_ONE:
756 CondCode = AArch64CC::MI;
757 CondCode2 = AArch64CC::GT;
758 break;
759 case CmpInst::FCMP_ORD:
760 CondCode = AArch64CC::VC;
761 break;
762 case CmpInst::FCMP_UNO:
763 CondCode = AArch64CC::VS;
764 break;
765 case CmpInst::FCMP_UEQ:
766 CondCode = AArch64CC::EQ;
767 CondCode2 = AArch64CC::VS;
768 break;
769 case CmpInst::FCMP_UGT:
770 CondCode = AArch64CC::HI;
771 break;
772 case CmpInst::FCMP_UGE:
773 CondCode = AArch64CC::PL;
774 break;
775 case CmpInst::FCMP_ULT:
776 CondCode = AArch64CC::LT;
777 break;
778 case CmpInst::FCMP_ULE:
779 CondCode = AArch64CC::LE;
780 break;
781 case CmpInst::FCMP_UNE:
782 CondCode = AArch64CC::NE;
783 break;
784 }
785}
786
Ahmed Bougacha641cb202017-03-27 16:35:31 +0000787bool AArch64InstructionSelector::selectCompareBranch(
788 MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
789
790 const unsigned CondReg = I.getOperand(0).getReg();
791 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
792 MachineInstr *CCMI = MRI.getVRegDef(CondReg);
Aditya Nandakumar02c602e2017-07-31 17:00:16 +0000793 if (CCMI->getOpcode() == TargetOpcode::G_TRUNC)
794 CCMI = MRI.getVRegDef(CCMI->getOperand(1).getReg());
Ahmed Bougacha641cb202017-03-27 16:35:31 +0000795 if (CCMI->getOpcode() != TargetOpcode::G_ICMP)
796 return false;
797
798 unsigned LHS = CCMI->getOperand(2).getReg();
799 unsigned RHS = CCMI->getOperand(3).getReg();
800 if (!getConstantVRegVal(RHS, MRI))
801 std::swap(RHS, LHS);
802
803 const auto RHSImm = getConstantVRegVal(RHS, MRI);
804 if (!RHSImm || *RHSImm != 0)
805 return false;
806
807 const RegisterBank &RB = *RBI.getRegBank(LHS, MRI, TRI);
808 if (RB.getID() != AArch64::GPRRegBankID)
809 return false;
810
811 const auto Pred = (CmpInst::Predicate)CCMI->getOperand(1).getPredicate();
812 if (Pred != CmpInst::ICMP_NE && Pred != CmpInst::ICMP_EQ)
813 return false;
814
815 const unsigned CmpWidth = MRI.getType(LHS).getSizeInBits();
816 unsigned CBOpc = 0;
817 if (CmpWidth <= 32)
818 CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZW : AArch64::CBNZW);
819 else if (CmpWidth == 64)
820 CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZX : AArch64::CBNZX);
821 else
822 return false;
823
Aditya Nandakumar18b3f9d2018-01-17 19:31:33 +0000824 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CBOpc))
825 .addUse(LHS)
826 .addMBB(DestMBB)
827 .constrainAllUses(TII, TRI, RBI);
Ahmed Bougacha641cb202017-03-27 16:35:31 +0000828
Ahmed Bougacha641cb202017-03-27 16:35:31 +0000829 I.eraseFromParent();
830 return true;
831}
832
Amara Emerson9bf092d2019-04-09 21:22:43 +0000833bool AArch64InstructionSelector::selectVectorSHL(
834 MachineInstr &I, MachineRegisterInfo &MRI) const {
835 assert(I.getOpcode() == TargetOpcode::G_SHL);
836 unsigned DstReg = I.getOperand(0).getReg();
837 const LLT Ty = MRI.getType(DstReg);
838 unsigned Src1Reg = I.getOperand(1).getReg();
839 unsigned Src2Reg = I.getOperand(2).getReg();
840
841 if (!Ty.isVector())
842 return false;
843
844 unsigned Opc = 0;
Amara Emerson9bf092d2019-04-09 21:22:43 +0000845 if (Ty == LLT::vector(4, 32)) {
846 Opc = AArch64::USHLv4i32;
Amara Emerson9bf092d2019-04-09 21:22:43 +0000847 } else if (Ty == LLT::vector(2, 32)) {
848 Opc = AArch64::USHLv2i32;
Amara Emerson9bf092d2019-04-09 21:22:43 +0000849 } else {
850 LLVM_DEBUG(dbgs() << "Unhandled G_SHL type");
851 return false;
852 }
853
854 MachineIRBuilder MIB(I);
855 auto UShl = MIB.buildInstr(Opc, {DstReg}, {Src1Reg, Src2Reg});
856 constrainSelectedInstRegOperands(*UShl, TII, TRI, RBI);
857 I.eraseFromParent();
858 return true;
859}
860
861bool AArch64InstructionSelector::selectVectorASHR(
862 MachineInstr &I, MachineRegisterInfo &MRI) const {
863 assert(I.getOpcode() == TargetOpcode::G_ASHR);
864 unsigned DstReg = I.getOperand(0).getReg();
865 const LLT Ty = MRI.getType(DstReg);
866 unsigned Src1Reg = I.getOperand(1).getReg();
867 unsigned Src2Reg = I.getOperand(2).getReg();
868
869 if (!Ty.isVector())
870 return false;
871
872 // There is not a shift right register instruction, but the shift left
873 // register instruction takes a signed value, where negative numbers specify a
874 // right shift.
875
876 unsigned Opc = 0;
877 unsigned NegOpc = 0;
878 const TargetRegisterClass *RC = nullptr;
879 if (Ty == LLT::vector(4, 32)) {
880 Opc = AArch64::SSHLv4i32;
881 NegOpc = AArch64::NEGv4i32;
882 RC = &AArch64::FPR128RegClass;
883 } else if (Ty == LLT::vector(2, 32)) {
884 Opc = AArch64::SSHLv2i32;
885 NegOpc = AArch64::NEGv2i32;
886 RC = &AArch64::FPR64RegClass;
887 } else {
888 LLVM_DEBUG(dbgs() << "Unhandled G_ASHR type");
889 return false;
890 }
891
892 MachineIRBuilder MIB(I);
893 auto Neg = MIB.buildInstr(NegOpc, {RC}, {Src2Reg});
894 constrainSelectedInstRegOperands(*Neg, TII, TRI, RBI);
895 auto SShl = MIB.buildInstr(Opc, {DstReg}, {Src1Reg, Neg});
896 constrainSelectedInstRegOperands(*SShl, TII, TRI, RBI);
897 I.eraseFromParent();
898 return true;
899}
900
Tim Northovere9600d82017-02-08 17:57:27 +0000901bool AArch64InstructionSelector::selectVaStartAAPCS(
902 MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
903 return false;
904}
905
906bool AArch64InstructionSelector::selectVaStartDarwin(
907 MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
908 AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
909 unsigned ListReg = I.getOperand(0).getReg();
910
911 unsigned ArgsAddrReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
912
913 auto MIB =
914 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::ADDXri))
915 .addDef(ArgsAddrReg)
916 .addFrameIndex(FuncInfo->getVarArgsStackIndex())
917 .addImm(0)
918 .addImm(0);
919
920 constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
921
922 MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::STRXui))
923 .addUse(ArgsAddrReg)
924 .addUse(ListReg)
925 .addImm(0)
926 .addMemOperand(*I.memoperands_begin());
927
928 constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
929 I.eraseFromParent();
930 return true;
931}
932
Amara Emerson1e8c1642018-07-31 00:09:02 +0000933void AArch64InstructionSelector::materializeLargeCMVal(
934 MachineInstr &I, const Value *V, unsigned char OpFlags) const {
935 MachineBasicBlock &MBB = *I.getParent();
936 MachineFunction &MF = *MBB.getParent();
937 MachineRegisterInfo &MRI = MF.getRegInfo();
938 MachineIRBuilder MIB(I);
939
Aditya Nandakumarcef44a22018-12-11 00:48:50 +0000940 auto MovZ = MIB.buildInstr(AArch64::MOVZXi, {&AArch64::GPR64RegClass}, {});
Amara Emerson1e8c1642018-07-31 00:09:02 +0000941 MovZ->addOperand(MF, I.getOperand(1));
942 MovZ->getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_G0 |
943 AArch64II::MO_NC);
944 MovZ->addOperand(MF, MachineOperand::CreateImm(0));
945 constrainSelectedInstRegOperands(*MovZ, TII, TRI, RBI);
946
947 auto BuildMovK = [&](unsigned SrcReg, unsigned char Flags, unsigned Offset,
948 unsigned ForceDstReg) {
949 unsigned DstReg = ForceDstReg
950 ? ForceDstReg
951 : MRI.createVirtualRegister(&AArch64::GPR64RegClass);
952 auto MovI = MIB.buildInstr(AArch64::MOVKXi).addDef(DstReg).addUse(SrcReg);
953 if (auto *GV = dyn_cast<GlobalValue>(V)) {
954 MovI->addOperand(MF, MachineOperand::CreateGA(
955 GV, MovZ->getOperand(1).getOffset(), Flags));
956 } else {
957 MovI->addOperand(
958 MF, MachineOperand::CreateBA(cast<BlockAddress>(V),
959 MovZ->getOperand(1).getOffset(), Flags));
960 }
961 MovI->addOperand(MF, MachineOperand::CreateImm(Offset));
962 constrainSelectedInstRegOperands(*MovI, TII, TRI, RBI);
963 return DstReg;
964 };
Aditya Nandakumarfef76192019-02-05 22:14:40 +0000965 unsigned DstReg = BuildMovK(MovZ.getReg(0),
Amara Emerson1e8c1642018-07-31 00:09:02 +0000966 AArch64II::MO_G1 | AArch64II::MO_NC, 16, 0);
967 DstReg = BuildMovK(DstReg, AArch64II::MO_G2 | AArch64II::MO_NC, 32, 0);
968 BuildMovK(DstReg, AArch64II::MO_G3, 48, I.getOperand(0).getReg());
969 return;
970}
971
Daniel Sandersf76f3152017-11-16 00:46:35 +0000972bool AArch64InstructionSelector::select(MachineInstr &I,
973 CodeGenCoverage &CoverageInfo) const {
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000974 assert(I.getParent() && "Instruction should be in a basic block!");
975 assert(I.getParent()->getParent() && "Instruction should be in a function!");
976
977 MachineBasicBlock &MBB = *I.getParent();
978 MachineFunction &MF = *MBB.getParent();
979 MachineRegisterInfo &MRI = MF.getRegInfo();
980
Tim Northovercdf23f12016-10-31 18:30:59 +0000981 unsigned Opcode = I.getOpcode();
Aditya Nandakumarefd8a842017-08-23 20:45:48 +0000982 // G_PHI requires same handling as PHI
983 if (!isPreISelGenericOpcode(Opcode) || Opcode == TargetOpcode::G_PHI) {
Tim Northovercdf23f12016-10-31 18:30:59 +0000984 // Certain non-generic instructions also need some special handling.
985
986 if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
987 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
Tim Northover7d88da62016-11-08 00:34:06 +0000988
Aditya Nandakumarefd8a842017-08-23 20:45:48 +0000989 if (Opcode == TargetOpcode::PHI || Opcode == TargetOpcode::G_PHI) {
Tim Northover7d88da62016-11-08 00:34:06 +0000990 const unsigned DefReg = I.getOperand(0).getReg();
991 const LLT DefTy = MRI.getType(DefReg);
992
993 const TargetRegisterClass *DefRC = nullptr;
994 if (TargetRegisterInfo::isPhysicalRegister(DefReg)) {
995 DefRC = TRI.getRegClass(DefReg);
996 } else {
997 const RegClassOrRegBank &RegClassOrBank =
998 MRI.getRegClassOrRegBank(DefReg);
999
1000 DefRC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
1001 if (!DefRC) {
1002 if (!DefTy.isValid()) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001003 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
Tim Northover7d88da62016-11-08 00:34:06 +00001004 return false;
1005 }
1006 const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
1007 DefRC = getRegClassForTypeOnBank(DefTy, RB, RBI);
1008 if (!DefRC) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001009 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
Tim Northover7d88da62016-11-08 00:34:06 +00001010 return false;
1011 }
1012 }
1013 }
Aditya Nandakumarefd8a842017-08-23 20:45:48 +00001014 I.setDesc(TII.get(TargetOpcode::PHI));
Tim Northover7d88da62016-11-08 00:34:06 +00001015
1016 return RBI.constrainGenericRegister(DefReg, *DefRC, MRI);
1017 }
1018
1019 if (I.isCopy())
Tim Northovercdf23f12016-10-31 18:30:59 +00001020 return selectCopy(I, TII, MRI, TRI, RBI);
Tim Northover7d88da62016-11-08 00:34:06 +00001021
1022 return true;
Tim Northovercdf23f12016-10-31 18:30:59 +00001023 }
1024
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001025
1026 if (I.getNumOperands() != I.getNumExplicitOperands()) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001027 LLVM_DEBUG(
1028 dbgs() << "Generic instruction has unexpected implicit operands\n");
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001029 return false;
1030 }
1031
Daniel Sandersf76f3152017-11-16 00:46:35 +00001032 if (selectImpl(I, CoverageInfo))
Ahmed Bougacha36f70352016-12-21 23:26:20 +00001033 return true;
1034
Tim Northover32a078a2016-09-15 10:09:59 +00001035 LLT Ty =
1036 I.getOperand(0).isReg() ? MRI.getType(I.getOperand(0).getReg()) : LLT{};
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001037
Amara Emerson3739a202019-03-15 21:59:50 +00001038 MachineIRBuilder MIB(I);
1039
Tim Northover69271c62016-10-12 22:49:11 +00001040 switch (Opcode) {
Tim Northover5e3dbf32016-10-12 22:49:01 +00001041 case TargetOpcode::G_BRCOND: {
1042 if (Ty.getSizeInBits() > 32) {
1043 // We shouldn't need this on AArch64, but it would be implemented as an
1044 // EXTRACT_SUBREG followed by a TBNZW because TBNZX has no encoding if the
1045 // bit being tested is < 32.
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001046 LLVM_DEBUG(dbgs() << "G_BRCOND has type: " << Ty
1047 << ", expected at most 32-bits");
Tim Northover5e3dbf32016-10-12 22:49:01 +00001048 return false;
1049 }
1050
1051 const unsigned CondReg = I.getOperand(0).getReg();
1052 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
1053
Kristof Beylse66bc1f2018-12-18 08:50:02 +00001054 // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z
1055 // instructions will not be produced, as they are conditional branch
1056 // instructions that do not set flags.
1057 bool ProduceNonFlagSettingCondBr =
1058 !MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening);
1059 if (ProduceNonFlagSettingCondBr && selectCompareBranch(I, MF, MRI))
Ahmed Bougacha641cb202017-03-27 16:35:31 +00001060 return true;
1061
Kristof Beylse66bc1f2018-12-18 08:50:02 +00001062 if (ProduceNonFlagSettingCondBr) {
1063 auto MIB = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::TBNZW))
1064 .addUse(CondReg)
1065 .addImm(/*bit offset=*/0)
1066 .addMBB(DestMBB);
Tim Northover5e3dbf32016-10-12 22:49:01 +00001067
Kristof Beylse66bc1f2018-12-18 08:50:02 +00001068 I.eraseFromParent();
1069 return constrainSelectedInstRegOperands(*MIB.getInstr(), TII, TRI, RBI);
1070 } else {
1071 auto CMP = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ANDSWri))
1072 .addDef(AArch64::WZR)
1073 .addUse(CondReg)
1074 .addImm(1);
1075 constrainSelectedInstRegOperands(*CMP.getInstr(), TII, TRI, RBI);
1076 auto Bcc =
1077 BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::Bcc))
1078 .addImm(AArch64CC::EQ)
1079 .addMBB(DestMBB);
1080
1081 I.eraseFromParent();
1082 return constrainSelectedInstRegOperands(*Bcc.getInstr(), TII, TRI, RBI);
1083 }
Tim Northover5e3dbf32016-10-12 22:49:01 +00001084 }
1085
Kristof Beyls65a12c02017-01-30 09:13:18 +00001086 case TargetOpcode::G_BRINDIRECT: {
1087 I.setDesc(TII.get(AArch64::BR));
1088 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1089 }
1090
Jessica Paquette67ab9eb2019-04-26 18:00:01 +00001091 case TargetOpcode::G_BSWAP: {
1092 // Handle vector types for G_BSWAP directly.
1093 unsigned DstReg = I.getOperand(0).getReg();
1094 LLT DstTy = MRI.getType(DstReg);
1095
1096 // We should only get vector types here; everything else is handled by the
1097 // importer right now.
1098 if (!DstTy.isVector() || DstTy.getSizeInBits() > 128) {
1099 LLVM_DEBUG(dbgs() << "Dst type for G_BSWAP currently unsupported.\n");
1100 return false;
1101 }
1102
1103 // Only handle 4 and 2 element vectors for now.
1104 // TODO: 16-bit elements.
1105 unsigned NumElts = DstTy.getNumElements();
1106 if (NumElts != 4 && NumElts != 2) {
1107 LLVM_DEBUG(dbgs() << "Unsupported number of elements for G_BSWAP.\n");
1108 return false;
1109 }
1110
1111 // Choose the correct opcode for the supported types. Right now, that's
1112 // v2s32, v4s32, and v2s64.
1113 unsigned Opc = 0;
1114 unsigned EltSize = DstTy.getElementType().getSizeInBits();
1115 if (EltSize == 32)
1116 Opc = (DstTy.getNumElements() == 2) ? AArch64::REV32v8i8
1117 : AArch64::REV32v16i8;
1118 else if (EltSize == 64)
1119 Opc = AArch64::REV64v16i8;
1120
1121 // We should always get something by the time we get here...
1122 assert(Opc != 0 && "Didn't get an opcode for G_BSWAP?");
1123
1124 I.setDesc(TII.get(Opc));
1125 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1126 }
1127
Tim Northover4494d692016-10-18 19:47:57 +00001128 case TargetOpcode::G_FCONSTANT:
Tim Northover4edc60d2016-10-10 21:49:42 +00001129 case TargetOpcode::G_CONSTANT: {
Tim Northover4494d692016-10-18 19:47:57 +00001130 const bool isFP = Opcode == TargetOpcode::G_FCONSTANT;
1131
1132 const LLT s32 = LLT::scalar(32);
1133 const LLT s64 = LLT::scalar(64);
1134 const LLT p0 = LLT::pointer(0, 64);
1135
1136 const unsigned DefReg = I.getOperand(0).getReg();
1137 const LLT DefTy = MRI.getType(DefReg);
1138 const unsigned DefSize = DefTy.getSizeInBits();
1139 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1140
1141 // FIXME: Redundant check, but even less readable when factored out.
1142 if (isFP) {
1143 if (Ty != s32 && Ty != s64) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001144 LLVM_DEBUG(dbgs() << "Unable to materialize FP " << Ty
1145 << " constant, expected: " << s32 << " or " << s64
1146 << '\n');
Tim Northover4494d692016-10-18 19:47:57 +00001147 return false;
1148 }
1149
1150 if (RB.getID() != AArch64::FPRRegBankID) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001151 LLVM_DEBUG(dbgs() << "Unable to materialize FP " << Ty
1152 << " constant on bank: " << RB
1153 << ", expected: FPR\n");
Tim Northover4494d692016-10-18 19:47:57 +00001154 return false;
1155 }
Daniel Sanders11300ce2017-10-13 21:28:03 +00001156
1157 // The case when we have 0.0 is covered by tablegen. Reject it here so we
1158 // can be sure tablegen works correctly and isn't rescued by this code.
1159 if (I.getOperand(1).getFPImm()->getValueAPF().isExactlyValue(0.0))
1160 return false;
Tim Northover4494d692016-10-18 19:47:57 +00001161 } else {
Daniel Sanders05540042017-08-08 10:44:31 +00001162 // s32 and s64 are covered by tablegen.
1163 if (Ty != p0) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001164 LLVM_DEBUG(dbgs() << "Unable to materialize integer " << Ty
1165 << " constant, expected: " << s32 << ", " << s64
1166 << ", or " << p0 << '\n');
Tim Northover4494d692016-10-18 19:47:57 +00001167 return false;
1168 }
1169
1170 if (RB.getID() != AArch64::GPRRegBankID) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001171 LLVM_DEBUG(dbgs() << "Unable to materialize integer " << Ty
1172 << " constant on bank: " << RB
1173 << ", expected: GPR\n");
Tim Northover4494d692016-10-18 19:47:57 +00001174 return false;
1175 }
1176 }
1177
1178 const unsigned MovOpc =
1179 DefSize == 32 ? AArch64::MOVi32imm : AArch64::MOVi64imm;
1180
1181 I.setDesc(TII.get(MovOpc));
1182
1183 if (isFP) {
1184 const TargetRegisterClass &GPRRC =
1185 DefSize == 32 ? AArch64::GPR32RegClass : AArch64::GPR64RegClass;
1186 const TargetRegisterClass &FPRRC =
1187 DefSize == 32 ? AArch64::FPR32RegClass : AArch64::FPR64RegClass;
1188
1189 const unsigned DefGPRReg = MRI.createVirtualRegister(&GPRRC);
1190 MachineOperand &RegOp = I.getOperand(0);
1191 RegOp.setReg(DefGPRReg);
Amara Emerson3739a202019-03-15 21:59:50 +00001192 MIB.setInsertPt(MIB.getMBB(), std::next(I.getIterator()));
1193 MIB.buildCopy({DefReg}, {DefGPRReg});
Tim Northover4494d692016-10-18 19:47:57 +00001194
1195 if (!RBI.constrainGenericRegister(DefReg, FPRRC, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001196 LLVM_DEBUG(dbgs() << "Failed to constrain G_FCONSTANT def operand\n");
Tim Northover4494d692016-10-18 19:47:57 +00001197 return false;
1198 }
1199
1200 MachineOperand &ImmOp = I.getOperand(1);
1201 // FIXME: Is going through int64_t always correct?
1202 ImmOp.ChangeToImmediate(
1203 ImmOp.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
Daniel Sanders066ebbf2017-02-24 15:43:30 +00001204 } else if (I.getOperand(1).isCImm()) {
Tim Northover9267ac52016-12-05 21:47:07 +00001205 uint64_t Val = I.getOperand(1).getCImm()->getZExtValue();
1206 I.getOperand(1).ChangeToImmediate(Val);
Daniel Sanders066ebbf2017-02-24 15:43:30 +00001207 } else if (I.getOperand(1).isImm()) {
1208 uint64_t Val = I.getOperand(1).getImm();
1209 I.getOperand(1).ChangeToImmediate(Val);
Tim Northover4494d692016-10-18 19:47:57 +00001210 }
1211
1212 constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1213 return true;
Tim Northover4edc60d2016-10-10 21:49:42 +00001214 }
Tim Northover7b6d66c2017-07-20 22:58:38 +00001215 case TargetOpcode::G_EXTRACT: {
1216 LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
Amara Emersonbc03bae2018-02-18 17:03:02 +00001217 LLT DstTy = MRI.getType(I.getOperand(0).getReg());
Amara Emerson242efdb2018-02-18 17:28:34 +00001218 (void)DstTy;
Amara Emersonbc03bae2018-02-18 17:03:02 +00001219 unsigned SrcSize = SrcTy.getSizeInBits();
Tim Northover7b6d66c2017-07-20 22:58:38 +00001220 // Larger extracts are vectors, same-size extracts should be something else
1221 // by now (either split up or simplified to a COPY).
1222 if (SrcTy.getSizeInBits() > 64 || Ty.getSizeInBits() > 32)
1223 return false;
1224
Amara Emersonbc03bae2018-02-18 17:03:02 +00001225 I.setDesc(TII.get(SrcSize == 64 ? AArch64::UBFMXri : AArch64::UBFMWri));
Tim Northover7b6d66c2017-07-20 22:58:38 +00001226 MachineInstrBuilder(MF, I).addImm(I.getOperand(2).getImm() +
1227 Ty.getSizeInBits() - 1);
1228
Amara Emersonbc03bae2018-02-18 17:03:02 +00001229 if (SrcSize < 64) {
1230 assert(SrcSize == 32 && DstTy.getSizeInBits() == 16 &&
1231 "unexpected G_EXTRACT types");
1232 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1233 }
1234
Tim Northover7b6d66c2017-07-20 22:58:38 +00001235 unsigned DstReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
Amara Emerson3739a202019-03-15 21:59:50 +00001236 MIB.setInsertPt(MIB.getMBB(), std::next(I.getIterator()));
Amara Emerson86271782019-03-18 19:20:10 +00001237 MIB.buildInstr(TargetOpcode::COPY, {I.getOperand(0).getReg()}, {})
1238 .addReg(DstReg, 0, AArch64::sub_32);
Tim Northover7b6d66c2017-07-20 22:58:38 +00001239 RBI.constrainGenericRegister(I.getOperand(0).getReg(),
1240 AArch64::GPR32RegClass, MRI);
1241 I.getOperand(0).setReg(DstReg);
1242
1243 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1244 }
1245
1246 case TargetOpcode::G_INSERT: {
1247 LLT SrcTy = MRI.getType(I.getOperand(2).getReg());
Amara Emersonbc03bae2018-02-18 17:03:02 +00001248 LLT DstTy = MRI.getType(I.getOperand(0).getReg());
1249 unsigned DstSize = DstTy.getSizeInBits();
Tim Northover7b6d66c2017-07-20 22:58:38 +00001250 // Larger inserts are vectors, same-size ones should be something else by
1251 // now (split up or turned into COPYs).
1252 if (Ty.getSizeInBits() > 64 || SrcTy.getSizeInBits() > 32)
1253 return false;
1254
Amara Emersonbc03bae2018-02-18 17:03:02 +00001255 I.setDesc(TII.get(DstSize == 64 ? AArch64::BFMXri : AArch64::BFMWri));
Tim Northover7b6d66c2017-07-20 22:58:38 +00001256 unsigned LSB = I.getOperand(3).getImm();
1257 unsigned Width = MRI.getType(I.getOperand(2).getReg()).getSizeInBits();
Amara Emersonbc03bae2018-02-18 17:03:02 +00001258 I.getOperand(3).setImm((DstSize - LSB) % DstSize);
Tim Northover7b6d66c2017-07-20 22:58:38 +00001259 MachineInstrBuilder(MF, I).addImm(Width - 1);
1260
Amara Emersonbc03bae2018-02-18 17:03:02 +00001261 if (DstSize < 64) {
1262 assert(DstSize == 32 && SrcTy.getSizeInBits() == 16 &&
1263 "unexpected G_INSERT types");
1264 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1265 }
1266
Tim Northover7b6d66c2017-07-20 22:58:38 +00001267 unsigned SrcReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
1268 BuildMI(MBB, I.getIterator(), I.getDebugLoc(),
1269 TII.get(AArch64::SUBREG_TO_REG))
1270 .addDef(SrcReg)
1271 .addImm(0)
1272 .addUse(I.getOperand(2).getReg())
1273 .addImm(AArch64::sub_32);
1274 RBI.constrainGenericRegister(I.getOperand(2).getReg(),
1275 AArch64::GPR32RegClass, MRI);
1276 I.getOperand(2).setReg(SrcReg);
1277
1278 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1279 }
Ahmed Bougacha0306b5e2016-08-16 14:02:42 +00001280 case TargetOpcode::G_FRAME_INDEX: {
1281 // allocas and G_FRAME_INDEX are only supported in addrspace(0).
Tim Northover5ae83502016-09-15 09:20:34 +00001282 if (Ty != LLT::pointer(0, 64)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001283 LLVM_DEBUG(dbgs() << "G_FRAME_INDEX pointer has type: " << Ty
1284 << ", expected: " << LLT::pointer(0, 64) << '\n');
Ahmed Bougacha0306b5e2016-08-16 14:02:42 +00001285 return false;
1286 }
Ahmed Bougacha0306b5e2016-08-16 14:02:42 +00001287 I.setDesc(TII.get(AArch64::ADDXri));
Ahmed Bougacha0306b5e2016-08-16 14:02:42 +00001288
1289 // MOs for a #0 shifted immediate.
1290 I.addOperand(MachineOperand::CreateImm(0));
1291 I.addOperand(MachineOperand::CreateImm(0));
1292
1293 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1294 }
Tim Northoverbdf16242016-10-10 21:50:00 +00001295
1296 case TargetOpcode::G_GLOBAL_VALUE: {
1297 auto GV = I.getOperand(1).getGlobal();
1298 if (GV->isThreadLocal()) {
1299 // FIXME: we don't support TLS yet.
1300 return false;
1301 }
1302 unsigned char OpFlags = STI.ClassifyGlobalReference(GV, TM);
Tim Northoverfe7c59a2016-12-13 18:25:38 +00001303 if (OpFlags & AArch64II::MO_GOT) {
Tim Northoverbdf16242016-10-10 21:50:00 +00001304 I.setDesc(TII.get(AArch64::LOADgot));
Tim Northoverfe7c59a2016-12-13 18:25:38 +00001305 I.getOperand(1).setTargetFlags(OpFlags);
Amara Emersond5785772018-01-18 19:21:27 +00001306 } else if (TM.getCodeModel() == CodeModel::Large) {
1307 // Materialize the global using movz/movk instructions.
Amara Emerson1e8c1642018-07-31 00:09:02 +00001308 materializeLargeCMVal(I, GV, OpFlags);
Amara Emersond5785772018-01-18 19:21:27 +00001309 I.eraseFromParent();
1310 return true;
David Green9dd1d452018-08-22 11:31:39 +00001311 } else if (TM.getCodeModel() == CodeModel::Tiny) {
1312 I.setDesc(TII.get(AArch64::ADR));
1313 I.getOperand(1).setTargetFlags(OpFlags);
Tim Northoverfe7c59a2016-12-13 18:25:38 +00001314 } else {
Tim Northoverbdf16242016-10-10 21:50:00 +00001315 I.setDesc(TII.get(AArch64::MOVaddr));
1316 I.getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_PAGE);
1317 MachineInstrBuilder MIB(MF, I);
1318 MIB.addGlobalAddress(GV, I.getOperand(1).getOffset(),
1319 OpFlags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
1320 }
1321 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1322 }
1323
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001324 case TargetOpcode::G_LOAD:
1325 case TargetOpcode::G_STORE: {
Tim Northover0f140c72016-09-09 11:46:34 +00001326 LLT PtrTy = MRI.getType(I.getOperand(1).getReg());
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001327
Tim Northover5ae83502016-09-15 09:20:34 +00001328 if (PtrTy != LLT::pointer(0, 64)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001329 LLVM_DEBUG(dbgs() << "Load/Store pointer has type: " << PtrTy
1330 << ", expected: " << LLT::pointer(0, 64) << '\n');
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001331 return false;
1332 }
1333
Daniel Sanders3c1c4c02017-12-05 05:52:07 +00001334 auto &MemOp = **I.memoperands_begin();
1335 if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001336 LLVM_DEBUG(dbgs() << "Atomic load/store not supported yet\n");
Daniel Sanders3c1c4c02017-12-05 05:52:07 +00001337 return false;
1338 }
Daniel Sandersf84bc372018-05-05 20:53:24 +00001339 unsigned MemSizeInBits = MemOp.getSize() * 8;
Daniel Sanders3c1c4c02017-12-05 05:52:07 +00001340
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001341 const unsigned PtrReg = I.getOperand(1).getReg();
Ahmed Bougachaf0b22c42017-03-27 18:14:20 +00001342#ifndef NDEBUG
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001343 const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, MRI, TRI);
Ahmed Bougachaf0b22c42017-03-27 18:14:20 +00001344 // Sanity-check the pointer register.
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001345 assert(PtrRB.getID() == AArch64::GPRRegBankID &&
1346 "Load/Store pointer operand isn't a GPR");
Tim Northover0f140c72016-09-09 11:46:34 +00001347 assert(MRI.getType(PtrReg).isPointer() &&
1348 "Load/Store pointer operand isn't a pointer");
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001349#endif
1350
1351 const unsigned ValReg = I.getOperand(0).getReg();
1352 const RegisterBank &RB = *RBI.getRegBank(ValReg, MRI, TRI);
1353
1354 const unsigned NewOpc =
Daniel Sandersf84bc372018-05-05 20:53:24 +00001355 selectLoadStoreUIOp(I.getOpcode(), RB.getID(), MemSizeInBits);
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001356 if (NewOpc == I.getOpcode())
1357 return false;
1358
1359 I.setDesc(TII.get(NewOpc));
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001360
Ahmed Bougacha8a654082017-03-27 17:31:52 +00001361 uint64_t Offset = 0;
1362 auto *PtrMI = MRI.getVRegDef(PtrReg);
1363
1364 // Try to fold a GEP into our unsigned immediate addressing mode.
1365 if (PtrMI->getOpcode() == TargetOpcode::G_GEP) {
1366 if (auto COff = getConstantVRegVal(PtrMI->getOperand(2).getReg(), MRI)) {
1367 int64_t Imm = *COff;
Daniel Sandersf84bc372018-05-05 20:53:24 +00001368 const unsigned Size = MemSizeInBits / 8;
Ahmed Bougacha8a654082017-03-27 17:31:52 +00001369 const unsigned Scale = Log2_32(Size);
1370 if ((Imm & (Size - 1)) == 0 && Imm >= 0 && Imm < (0x1000 << Scale)) {
1371 unsigned Ptr2Reg = PtrMI->getOperand(1).getReg();
1372 I.getOperand(1).setReg(Ptr2Reg);
1373 PtrMI = MRI.getVRegDef(Ptr2Reg);
1374 Offset = Imm / Size;
1375 }
1376 }
1377 }
1378
Ahmed Bougachaf75782f2017-03-27 17:31:56 +00001379 // If we haven't folded anything into our addressing mode yet, try to fold
1380 // a frame index into the base+offset.
1381 if (!Offset && PtrMI->getOpcode() == TargetOpcode::G_FRAME_INDEX)
1382 I.getOperand(1).ChangeToFrameIndex(PtrMI->getOperand(1).getIndex());
1383
Ahmed Bougacha8a654082017-03-27 17:31:52 +00001384 I.addOperand(MachineOperand::CreateImm(Offset));
Ahmed Bougacha85a66a62017-03-27 17:31:48 +00001385
1386 // If we're storing a 0, use WZR/XZR.
1387 if (auto CVal = getConstantVRegVal(ValReg, MRI)) {
1388 if (*CVal == 0 && Opcode == TargetOpcode::G_STORE) {
1389 if (I.getOpcode() == AArch64::STRWui)
1390 I.getOperand(0).setReg(AArch64::WZR);
1391 else if (I.getOpcode() == AArch64::STRXui)
1392 I.getOperand(0).setReg(AArch64::XZR);
1393 }
1394 }
1395
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001396 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1397 }
1398
Tim Northover9dd78f82017-02-08 21:22:25 +00001399 case TargetOpcode::G_SMULH:
1400 case TargetOpcode::G_UMULH: {
1401 // Reject the various things we don't support yet.
1402 if (unsupportedBinOp(I, RBI, MRI, TRI))
1403 return false;
1404
1405 const unsigned DefReg = I.getOperand(0).getReg();
1406 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1407
1408 if (RB.getID() != AArch64::GPRRegBankID) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001409 LLVM_DEBUG(dbgs() << "G_[SU]MULH on bank: " << RB << ", expected: GPR\n");
Tim Northover9dd78f82017-02-08 21:22:25 +00001410 return false;
1411 }
1412
1413 if (Ty != LLT::scalar(64)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001414 LLVM_DEBUG(dbgs() << "G_[SU]MULH has type: " << Ty
1415 << ", expected: " << LLT::scalar(64) << '\n');
Tim Northover9dd78f82017-02-08 21:22:25 +00001416 return false;
1417 }
1418
1419 unsigned NewOpc = I.getOpcode() == TargetOpcode::G_SMULH ? AArch64::SMULHrr
1420 : AArch64::UMULHrr;
1421 I.setDesc(TII.get(NewOpc));
1422
1423 // Now that we selected an opcode, we need to constrain the register
1424 // operands to use appropriate classes.
1425 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1426 }
Ahmed Bougacha33e19fe2016-08-18 16:05:11 +00001427 case TargetOpcode::G_FADD:
1428 case TargetOpcode::G_FSUB:
1429 case TargetOpcode::G_FMUL:
1430 case TargetOpcode::G_FDIV:
1431
Ahmed Bougacha2ac5bf92016-08-16 14:02:47 +00001432 case TargetOpcode::G_ASHR:
Amara Emerson9bf092d2019-04-09 21:22:43 +00001433 if (MRI.getType(I.getOperand(0).getReg()).isVector())
1434 return selectVectorASHR(I, MRI);
1435 LLVM_FALLTHROUGH;
1436 case TargetOpcode::G_SHL:
1437 if (Opcode == TargetOpcode::G_SHL &&
1438 MRI.getType(I.getOperand(0).getReg()).isVector())
1439 return selectVectorSHL(I, MRI);
1440 LLVM_FALLTHROUGH;
1441 case TargetOpcode::G_OR:
1442 case TargetOpcode::G_LSHR:
Tim Northover2fda4b02016-10-10 21:49:49 +00001443 case TargetOpcode::G_GEP: {
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001444 // Reject the various things we don't support yet.
Ahmed Bougacha59e160a2016-08-16 14:37:40 +00001445 if (unsupportedBinOp(I, RBI, MRI, TRI))
1446 return false;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001447
Ahmed Bougacha59e160a2016-08-16 14:37:40 +00001448 const unsigned OpSize = Ty.getSizeInBits();
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001449
1450 const unsigned DefReg = I.getOperand(0).getReg();
1451 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1452
1453 const unsigned NewOpc = selectBinaryOp(I.getOpcode(), RB.getID(), OpSize);
1454 if (NewOpc == I.getOpcode())
1455 return false;
1456
1457 I.setDesc(TII.get(NewOpc));
1458 // FIXME: Should the type be always reset in setDesc?
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001459
1460 // Now that we selected an opcode, we need to constrain the register
1461 // operands to use appropriate classes.
1462 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1463 }
Tim Northover3d38b3a2016-10-11 20:50:21 +00001464
Jessica Paquette7d6784f2019-03-14 22:54:29 +00001465 case TargetOpcode::G_UADDO: {
1466 // TODO: Support other types.
1467 unsigned OpSize = Ty.getSizeInBits();
1468 if (OpSize != 32 && OpSize != 64) {
1469 LLVM_DEBUG(
1470 dbgs()
1471 << "G_UADDO currently only supported for 32 and 64 b types.\n");
1472 return false;
1473 }
1474
1475 // TODO: Support vectors.
1476 if (Ty.isVector()) {
1477 LLVM_DEBUG(dbgs() << "G_UADDO currently only supported for scalars.\n");
1478 return false;
1479 }
1480
1481 // Add and set the set condition flag.
1482 unsigned AddsOpc = OpSize == 32 ? AArch64::ADDSWrr : AArch64::ADDSXrr;
1483 MachineIRBuilder MIRBuilder(I);
1484 auto AddsMI = MIRBuilder.buildInstr(
1485 AddsOpc, {I.getOperand(0).getReg()},
1486 {I.getOperand(2).getReg(), I.getOperand(3).getReg()});
1487 constrainSelectedInstRegOperands(*AddsMI, TII, TRI, RBI);
1488
1489 // Now, put the overflow result in the register given by the first operand
1490 // to the G_UADDO. CSINC increments the result when the predicate is false,
1491 // so to get the increment when it's true, we need to use the inverse. In
1492 // this case, we want to increment when carry is set.
1493 auto CsetMI = MIRBuilder
1494 .buildInstr(AArch64::CSINCWr, {I.getOperand(1).getReg()},
1495 {AArch64::WZR, AArch64::WZR})
1496 .addImm(getInvertedCondCode(AArch64CC::HS));
1497 constrainSelectedInstRegOperands(*CsetMI, TII, TRI, RBI);
1498 I.eraseFromParent();
1499 return true;
1500 }
1501
Tim Northover398c5f52017-02-14 20:56:29 +00001502 case TargetOpcode::G_PTR_MASK: {
1503 uint64_t Align = I.getOperand(2).getImm();
1504 if (Align >= 64 || Align == 0)
1505 return false;
1506
1507 uint64_t Mask = ~((1ULL << Align) - 1);
1508 I.setDesc(TII.get(AArch64::ANDXri));
1509 I.getOperand(2).setImm(AArch64_AM::encodeLogicalImmediate(Mask, 64));
1510
1511 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1512 }
Tim Northover037af52c2016-10-31 18:31:09 +00001513 case TargetOpcode::G_PTRTOINT:
Tim Northoverfb8d9892016-10-12 22:49:15 +00001514 case TargetOpcode::G_TRUNC: {
1515 const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
1516 const LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
1517
1518 const unsigned DstReg = I.getOperand(0).getReg();
1519 const unsigned SrcReg = I.getOperand(1).getReg();
1520
1521 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
1522 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
1523
1524 if (DstRB.getID() != SrcRB.getID()) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001525 LLVM_DEBUG(
1526 dbgs() << "G_TRUNC/G_PTRTOINT input/output on different banks\n");
Tim Northoverfb8d9892016-10-12 22:49:15 +00001527 return false;
1528 }
1529
1530 if (DstRB.getID() == AArch64::GPRRegBankID) {
1531 const TargetRegisterClass *DstRC =
1532 getRegClassForTypeOnBank(DstTy, DstRB, RBI);
1533 if (!DstRC)
1534 return false;
1535
1536 const TargetRegisterClass *SrcRC =
1537 getRegClassForTypeOnBank(SrcTy, SrcRB, RBI);
1538 if (!SrcRC)
1539 return false;
1540
1541 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1542 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001543 LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC/G_PTRTOINT\n");
Tim Northoverfb8d9892016-10-12 22:49:15 +00001544 return false;
1545 }
1546
1547 if (DstRC == SrcRC) {
1548 // Nothing to be done
Daniel Sanderscc36dbf2017-06-27 10:11:39 +00001549 } else if (Opcode == TargetOpcode::G_TRUNC && DstTy == LLT::scalar(32) &&
1550 SrcTy == LLT::scalar(64)) {
1551 llvm_unreachable("TableGen can import this case");
1552 return false;
Tim Northoverfb8d9892016-10-12 22:49:15 +00001553 } else if (DstRC == &AArch64::GPR32RegClass &&
1554 SrcRC == &AArch64::GPR64RegClass) {
1555 I.getOperand(1).setSubReg(AArch64::sub_32);
1556 } else {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001557 LLVM_DEBUG(
1558 dbgs() << "Unhandled mismatched classes in G_TRUNC/G_PTRTOINT\n");
Tim Northoverfb8d9892016-10-12 22:49:15 +00001559 return false;
1560 }
1561
1562 I.setDesc(TII.get(TargetOpcode::COPY));
1563 return true;
1564 } else if (DstRB.getID() == AArch64::FPRRegBankID) {
1565 if (DstTy == LLT::vector(4, 16) && SrcTy == LLT::vector(4, 32)) {
1566 I.setDesc(TII.get(AArch64::XTNv4i16));
1567 constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1568 return true;
1569 }
1570 }
1571
1572 return false;
1573 }
1574
Tim Northover3d38b3a2016-10-11 20:50:21 +00001575 case TargetOpcode::G_ANYEXT: {
1576 const unsigned DstReg = I.getOperand(0).getReg();
1577 const unsigned SrcReg = I.getOperand(1).getReg();
1578
Quentin Colombetcb629a82016-10-12 03:57:49 +00001579 const RegisterBank &RBDst = *RBI.getRegBank(DstReg, MRI, TRI);
1580 if (RBDst.getID() != AArch64::GPRRegBankID) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001581 LLVM_DEBUG(dbgs() << "G_ANYEXT on bank: " << RBDst
1582 << ", expected: GPR\n");
Quentin Colombetcb629a82016-10-12 03:57:49 +00001583 return false;
1584 }
Tim Northover3d38b3a2016-10-11 20:50:21 +00001585
Quentin Colombetcb629a82016-10-12 03:57:49 +00001586 const RegisterBank &RBSrc = *RBI.getRegBank(SrcReg, MRI, TRI);
1587 if (RBSrc.getID() != AArch64::GPRRegBankID) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001588 LLVM_DEBUG(dbgs() << "G_ANYEXT on bank: " << RBSrc
1589 << ", expected: GPR\n");
Tim Northover3d38b3a2016-10-11 20:50:21 +00001590 return false;
1591 }
1592
1593 const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
1594
1595 if (DstSize == 0) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001596 LLVM_DEBUG(dbgs() << "G_ANYEXT operand has no size, not a gvreg?\n");
Tim Northover3d38b3a2016-10-11 20:50:21 +00001597 return false;
1598 }
1599
Quentin Colombetcb629a82016-10-12 03:57:49 +00001600 if (DstSize != 64 && DstSize > 32) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001601 LLVM_DEBUG(dbgs() << "G_ANYEXT to size: " << DstSize
1602 << ", expected: 32 or 64\n");
Tim Northover3d38b3a2016-10-11 20:50:21 +00001603 return false;
1604 }
Quentin Colombetcb629a82016-10-12 03:57:49 +00001605 // At this point G_ANYEXT is just like a plain COPY, but we need
1606 // to explicitly form the 64-bit value if any.
1607 if (DstSize > 32) {
1608 unsigned ExtSrc = MRI.createVirtualRegister(&AArch64::GPR64allRegClass);
1609 BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
1610 .addDef(ExtSrc)
1611 .addImm(0)
1612 .addUse(SrcReg)
1613 .addImm(AArch64::sub_32);
1614 I.getOperand(1).setReg(ExtSrc);
Tim Northover3d38b3a2016-10-11 20:50:21 +00001615 }
Quentin Colombetcb629a82016-10-12 03:57:49 +00001616 return selectCopy(I, TII, MRI, TRI, RBI);
Tim Northover3d38b3a2016-10-11 20:50:21 +00001617 }
1618
1619 case TargetOpcode::G_ZEXT:
1620 case TargetOpcode::G_SEXT: {
1621 unsigned Opcode = I.getOpcode();
1622 const LLT DstTy = MRI.getType(I.getOperand(0).getReg()),
1623 SrcTy = MRI.getType(I.getOperand(1).getReg());
1624 const bool isSigned = Opcode == TargetOpcode::G_SEXT;
1625 const unsigned DefReg = I.getOperand(0).getReg();
1626 const unsigned SrcReg = I.getOperand(1).getReg();
1627 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1628
1629 if (RB.getID() != AArch64::GPRRegBankID) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001630 LLVM_DEBUG(dbgs() << TII.getName(I.getOpcode()) << " on bank: " << RB
1631 << ", expected: GPR\n");
Tim Northover3d38b3a2016-10-11 20:50:21 +00001632 return false;
1633 }
1634
1635 MachineInstr *ExtI;
1636 if (DstTy == LLT::scalar(64)) {
1637 // FIXME: Can we avoid manually doing this?
1638 if (!RBI.constrainGenericRegister(SrcReg, AArch64::GPR32RegClass, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001639 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(Opcode)
1640 << " operand\n");
Tim Northover3d38b3a2016-10-11 20:50:21 +00001641 return false;
1642 }
1643
1644 const unsigned SrcXReg =
1645 MRI.createVirtualRegister(&AArch64::GPR64RegClass);
1646 BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
1647 .addDef(SrcXReg)
1648 .addImm(0)
1649 .addUse(SrcReg)
1650 .addImm(AArch64::sub_32);
1651
1652 const unsigned NewOpc = isSigned ? AArch64::SBFMXri : AArch64::UBFMXri;
1653 ExtI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
1654 .addDef(DefReg)
1655 .addUse(SrcXReg)
1656 .addImm(0)
1657 .addImm(SrcTy.getSizeInBits() - 1);
Tim Northovera9105be2016-11-09 22:39:54 +00001658 } else if (DstTy.isScalar() && DstTy.getSizeInBits() <= 32) {
Tim Northover3d38b3a2016-10-11 20:50:21 +00001659 const unsigned NewOpc = isSigned ? AArch64::SBFMWri : AArch64::UBFMWri;
1660 ExtI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
1661 .addDef(DefReg)
1662 .addUse(SrcReg)
1663 .addImm(0)
1664 .addImm(SrcTy.getSizeInBits() - 1);
1665 } else {
1666 return false;
1667 }
1668
1669 constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1670
1671 I.eraseFromParent();
1672 return true;
1673 }
Tim Northoverc1d8c2b2016-10-11 22:29:23 +00001674
Tim Northover69271c62016-10-12 22:49:11 +00001675 case TargetOpcode::G_SITOFP:
1676 case TargetOpcode::G_UITOFP:
1677 case TargetOpcode::G_FPTOSI:
1678 case TargetOpcode::G_FPTOUI: {
1679 const LLT DstTy = MRI.getType(I.getOperand(0).getReg()),
1680 SrcTy = MRI.getType(I.getOperand(1).getReg());
1681 const unsigned NewOpc = selectFPConvOpc(Opcode, DstTy, SrcTy);
1682 if (NewOpc == Opcode)
1683 return false;
1684
1685 I.setDesc(TII.get(NewOpc));
1686 constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1687
1688 return true;
1689 }
1690
1691
Tim Northoverc1d8c2b2016-10-11 22:29:23 +00001692 case TargetOpcode::G_INTTOPTR:
Daniel Sandersedd07842017-08-17 09:26:14 +00001693 // The importer is currently unable to import pointer types since they
1694 // didn't exist in SelectionDAG.
Daniel Sanderseb2f5f32017-08-15 15:10:31 +00001695 return selectCopy(I, TII, MRI, TRI, RBI);
Daniel Sanders16e6dd32017-08-15 13:50:09 +00001696
Daniel Sandersedd07842017-08-17 09:26:14 +00001697 case TargetOpcode::G_BITCAST:
1698 // Imported SelectionDAG rules can handle every bitcast except those that
1699 // bitcast from a type to the same type. Ideally, these shouldn't occur
Amara Emersonb9560512019-04-11 20:32:24 +00001700 // but we might not run an optimizer that deletes them. The other exception
1701 // is bitcasts involving pointer types, as SelectionDAG has no knowledge
1702 // of them.
1703 return selectCopy(I, TII, MRI, TRI, RBI);
Daniel Sandersedd07842017-08-17 09:26:14 +00001704
Tim Northover9ac0eba2016-11-08 00:45:29 +00001705 case TargetOpcode::G_SELECT: {
1706 if (MRI.getType(I.getOperand(1).getReg()) != LLT::scalar(1)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001707 LLVM_DEBUG(dbgs() << "G_SELECT cond has type: " << Ty
1708 << ", expected: " << LLT::scalar(1) << '\n');
Tim Northover9ac0eba2016-11-08 00:45:29 +00001709 return false;
1710 }
1711
1712 const unsigned CondReg = I.getOperand(1).getReg();
1713 const unsigned TReg = I.getOperand(2).getReg();
1714 const unsigned FReg = I.getOperand(3).getReg();
1715
1716 unsigned CSelOpc = 0;
1717
1718 if (Ty == LLT::scalar(32)) {
1719 CSelOpc = AArch64::CSELWr;
Kristof Beylse9412b42017-01-19 13:32:14 +00001720 } else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) {
Tim Northover9ac0eba2016-11-08 00:45:29 +00001721 CSelOpc = AArch64::CSELXr;
1722 } else {
1723 return false;
1724 }
1725
1726 MachineInstr &TstMI =
1727 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ANDSWri))
1728 .addDef(AArch64::WZR)
1729 .addUse(CondReg)
1730 .addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
1731
1732 MachineInstr &CSelMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CSelOpc))
1733 .addDef(I.getOperand(0).getReg())
1734 .addUse(TReg)
1735 .addUse(FReg)
1736 .addImm(AArch64CC::NE);
1737
1738 constrainSelectedInstRegOperands(TstMI, TII, TRI, RBI);
1739 constrainSelectedInstRegOperands(CSelMI, TII, TRI, RBI);
1740
1741 I.eraseFromParent();
1742 return true;
1743 }
Tim Northover6c02ad52016-10-12 22:49:04 +00001744 case TargetOpcode::G_ICMP: {
Amara Emerson9bf092d2019-04-09 21:22:43 +00001745 if (Ty.isVector())
1746 return selectVectorICmp(I, MRI);
1747
Aditya Nandakumar02c602e2017-07-31 17:00:16 +00001748 if (Ty != LLT::scalar(32)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001749 LLVM_DEBUG(dbgs() << "G_ICMP result has type: " << Ty
1750 << ", expected: " << LLT::scalar(32) << '\n');
Tim Northover6c02ad52016-10-12 22:49:04 +00001751 return false;
1752 }
1753
1754 unsigned CmpOpc = 0;
1755 unsigned ZReg = 0;
1756
1757 LLT CmpTy = MRI.getType(I.getOperand(2).getReg());
1758 if (CmpTy == LLT::scalar(32)) {
1759 CmpOpc = AArch64::SUBSWrr;
1760 ZReg = AArch64::WZR;
1761 } else if (CmpTy == LLT::scalar(64) || CmpTy.isPointer()) {
1762 CmpOpc = AArch64::SUBSXrr;
1763 ZReg = AArch64::XZR;
1764 } else {
1765 return false;
1766 }
1767
Kristof Beyls22524402017-01-05 10:16:08 +00001768 // CSINC increments the result by one when the condition code is false.
1769 // Therefore, we have to invert the predicate to get an increment by 1 when
1770 // the predicate is true.
1771 const AArch64CC::CondCode invCC =
1772 changeICMPPredToAArch64CC(CmpInst::getInversePredicate(
1773 (CmpInst::Predicate)I.getOperand(1).getPredicate()));
Tim Northover6c02ad52016-10-12 22:49:04 +00001774
1775 MachineInstr &CmpMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CmpOpc))
1776 .addDef(ZReg)
1777 .addUse(I.getOperand(2).getReg())
1778 .addUse(I.getOperand(3).getReg());
1779
1780 MachineInstr &CSetMI =
1781 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
1782 .addDef(I.getOperand(0).getReg())
1783 .addUse(AArch64::WZR)
1784 .addUse(AArch64::WZR)
Kristof Beyls22524402017-01-05 10:16:08 +00001785 .addImm(invCC);
Tim Northover6c02ad52016-10-12 22:49:04 +00001786
1787 constrainSelectedInstRegOperands(CmpMI, TII, TRI, RBI);
1788 constrainSelectedInstRegOperands(CSetMI, TII, TRI, RBI);
1789
1790 I.eraseFromParent();
1791 return true;
1792 }
1793
Tim Northover7dd378d2016-10-12 22:49:07 +00001794 case TargetOpcode::G_FCMP: {
Aditya Nandakumar02c602e2017-07-31 17:00:16 +00001795 if (Ty != LLT::scalar(32)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001796 LLVM_DEBUG(dbgs() << "G_FCMP result has type: " << Ty
1797 << ", expected: " << LLT::scalar(32) << '\n');
Tim Northover7dd378d2016-10-12 22:49:07 +00001798 return false;
1799 }
1800
1801 unsigned CmpOpc = 0;
1802 LLT CmpTy = MRI.getType(I.getOperand(2).getReg());
1803 if (CmpTy == LLT::scalar(32)) {
1804 CmpOpc = AArch64::FCMPSrr;
1805 } else if (CmpTy == LLT::scalar(64)) {
1806 CmpOpc = AArch64::FCMPDrr;
1807 } else {
1808 return false;
1809 }
1810
1811 // FIXME: regbank
1812
1813 AArch64CC::CondCode CC1, CC2;
1814 changeFCMPPredToAArch64CC(
1815 (CmpInst::Predicate)I.getOperand(1).getPredicate(), CC1, CC2);
1816
1817 MachineInstr &CmpMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CmpOpc))
1818 .addUse(I.getOperand(2).getReg())
1819 .addUse(I.getOperand(3).getReg());
1820
1821 const unsigned DefReg = I.getOperand(0).getReg();
1822 unsigned Def1Reg = DefReg;
1823 if (CC2 != AArch64CC::AL)
1824 Def1Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
1825
1826 MachineInstr &CSetMI =
1827 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
1828 .addDef(Def1Reg)
1829 .addUse(AArch64::WZR)
1830 .addUse(AArch64::WZR)
Tim Northover33a1a0b2017-01-17 23:04:01 +00001831 .addImm(getInvertedCondCode(CC1));
Tim Northover7dd378d2016-10-12 22:49:07 +00001832
1833 if (CC2 != AArch64CC::AL) {
1834 unsigned Def2Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
1835 MachineInstr &CSet2MI =
1836 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
1837 .addDef(Def2Reg)
1838 .addUse(AArch64::WZR)
1839 .addUse(AArch64::WZR)
Tim Northover33a1a0b2017-01-17 23:04:01 +00001840 .addImm(getInvertedCondCode(CC2));
Tim Northover7dd378d2016-10-12 22:49:07 +00001841 MachineInstr &OrMI =
1842 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ORRWrr))
1843 .addDef(DefReg)
1844 .addUse(Def1Reg)
1845 .addUse(Def2Reg);
1846 constrainSelectedInstRegOperands(OrMI, TII, TRI, RBI);
1847 constrainSelectedInstRegOperands(CSet2MI, TII, TRI, RBI);
1848 }
1849
1850 constrainSelectedInstRegOperands(CmpMI, TII, TRI, RBI);
1851 constrainSelectedInstRegOperands(CSetMI, TII, TRI, RBI);
1852
1853 I.eraseFromParent();
1854 return true;
1855 }
Tim Northovere9600d82017-02-08 17:57:27 +00001856 case TargetOpcode::G_VASTART:
1857 return STI.isTargetDarwin() ? selectVaStartDarwin(I, MF, MRI)
1858 : selectVaStartAAPCS(I, MF, MRI);
Jessica Paquette7f6fe7c2019-04-29 20:58:17 +00001859 case TargetOpcode::G_INTRINSIC:
1860 return selectIntrinsic(I, MRI);
Amara Emerson1f5d9942018-04-25 14:43:59 +00001861 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
Jessica Paquette22c62152019-04-02 19:57:26 +00001862 return selectIntrinsicWithSideEffects(I, MRI);
Amara Emerson1e8c1642018-07-31 00:09:02 +00001863 case TargetOpcode::G_IMPLICIT_DEF: {
Justin Bogner4fc69662017-07-12 17:32:32 +00001864 I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
Amara Emerson58aea522018-02-02 01:44:43 +00001865 const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
1866 const unsigned DstReg = I.getOperand(0).getReg();
1867 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
1868 const TargetRegisterClass *DstRC =
1869 getRegClassForTypeOnBank(DstTy, DstRB, RBI);
1870 RBI.constrainGenericRegister(DstReg, *DstRC, MRI);
Justin Bogner4fc69662017-07-12 17:32:32 +00001871 return true;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001872 }
Amara Emerson1e8c1642018-07-31 00:09:02 +00001873 case TargetOpcode::G_BLOCK_ADDR: {
1874 if (TM.getCodeModel() == CodeModel::Large) {
1875 materializeLargeCMVal(I, I.getOperand(1).getBlockAddress(), 0);
1876 I.eraseFromParent();
1877 return true;
1878 } else {
1879 I.setDesc(TII.get(AArch64::MOVaddrBA));
1880 auto MovMI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::MOVaddrBA),
1881 I.getOperand(0).getReg())
1882 .addBlockAddress(I.getOperand(1).getBlockAddress(),
1883 /* Offset */ 0, AArch64II::MO_PAGE)
1884 .addBlockAddress(
1885 I.getOperand(1).getBlockAddress(), /* Offset */ 0,
1886 AArch64II::MO_NC | AArch64II::MO_PAGEOFF);
1887 I.eraseFromParent();
1888 return constrainSelectedInstRegOperands(*MovMI, TII, TRI, RBI);
1889 }
1890 }
Jessica Paquette991cb392019-04-23 20:46:19 +00001891 case TargetOpcode::G_INTRINSIC_TRUNC:
1892 return selectIntrinsicTrunc(I, MRI);
Jessica Paquette4fe75742019-04-23 23:03:03 +00001893 case TargetOpcode::G_INTRINSIC_ROUND:
1894 return selectIntrinsicRound(I, MRI);
Amara Emerson5ec14602018-12-10 18:44:58 +00001895 case TargetOpcode::G_BUILD_VECTOR:
1896 return selectBuildVector(I, MRI);
Amara Emerson8cb186c2018-12-20 01:11:04 +00001897 case TargetOpcode::G_MERGE_VALUES:
1898 return selectMergeValues(I, MRI);
Jessica Paquette245047d2019-01-24 22:00:41 +00001899 case TargetOpcode::G_UNMERGE_VALUES:
1900 return selectUnmergeValues(I, MRI);
Amara Emerson1abe05c2019-02-21 20:20:16 +00001901 case TargetOpcode::G_SHUFFLE_VECTOR:
1902 return selectShuffleVector(I, MRI);
Jessica Paquette607774c2019-03-11 22:18:01 +00001903 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
1904 return selectExtractElt(I, MRI);
Jessica Paquette5aff1f42019-03-14 18:01:30 +00001905 case TargetOpcode::G_INSERT_VECTOR_ELT:
1906 return selectInsertElt(I, MRI);
Amara Emerson2ff22982019-03-14 22:48:15 +00001907 case TargetOpcode::G_CONCAT_VECTORS:
1908 return selectConcatVectors(I, MRI);
Amara Emerson1e8c1642018-07-31 00:09:02 +00001909 }
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001910
1911 return false;
1912}
Daniel Sanders8a4bae92017-03-14 21:32:08 +00001913
Jessica Paquette991cb392019-04-23 20:46:19 +00001914bool AArch64InstructionSelector::selectIntrinsicTrunc(
1915 MachineInstr &I, MachineRegisterInfo &MRI) const {
1916 const LLT SrcTy = MRI.getType(I.getOperand(0).getReg());
1917
1918 // Select the correct opcode.
1919 unsigned Opc = 0;
1920 if (!SrcTy.isVector()) {
1921 switch (SrcTy.getSizeInBits()) {
1922 default:
1923 case 16:
1924 Opc = AArch64::FRINTZHr;
1925 break;
1926 case 32:
1927 Opc = AArch64::FRINTZSr;
1928 break;
1929 case 64:
1930 Opc = AArch64::FRINTZDr;
1931 break;
1932 }
1933 } else {
1934 unsigned NumElts = SrcTy.getNumElements();
1935 switch (SrcTy.getElementType().getSizeInBits()) {
1936 default:
1937 break;
1938 case 16:
1939 if (NumElts == 4)
1940 Opc = AArch64::FRINTZv4f16;
1941 else if (NumElts == 8)
1942 Opc = AArch64::FRINTZv8f16;
1943 break;
1944 case 32:
1945 if (NumElts == 2)
1946 Opc = AArch64::FRINTZv2f32;
1947 else if (NumElts == 4)
1948 Opc = AArch64::FRINTZv4f32;
1949 break;
1950 case 64:
1951 if (NumElts == 2)
1952 Opc = AArch64::FRINTZv2f64;
1953 break;
1954 }
1955 }
1956
1957 if (!Opc) {
1958 // Didn't get an opcode above, bail.
1959 LLVM_DEBUG(dbgs() << "Unsupported type for G_INTRINSIC_TRUNC!\n");
1960 return false;
1961 }
1962
1963 // Legalization would have set us up perfectly for this; we just need to
1964 // set the opcode and move on.
1965 I.setDesc(TII.get(Opc));
1966 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1967}
1968
Jessica Paquette4fe75742019-04-23 23:03:03 +00001969bool AArch64InstructionSelector::selectIntrinsicRound(
1970 MachineInstr &I, MachineRegisterInfo &MRI) const {
1971 const LLT SrcTy = MRI.getType(I.getOperand(0).getReg());
1972
1973 // Select the correct opcode.
1974 unsigned Opc = 0;
1975 if (!SrcTy.isVector()) {
1976 switch (SrcTy.getSizeInBits()) {
1977 default:
1978 case 16:
1979 Opc = AArch64::FRINTAHr;
1980 break;
1981 case 32:
1982 Opc = AArch64::FRINTASr;
1983 break;
1984 case 64:
1985 Opc = AArch64::FRINTADr;
1986 break;
1987 }
1988 } else {
1989 unsigned NumElts = SrcTy.getNumElements();
1990 switch (SrcTy.getElementType().getSizeInBits()) {
1991 default:
1992 break;
1993 case 16:
1994 if (NumElts == 4)
1995 Opc = AArch64::FRINTAv4f16;
1996 else if (NumElts == 8)
1997 Opc = AArch64::FRINTAv8f16;
1998 break;
1999 case 32:
2000 if (NumElts == 2)
2001 Opc = AArch64::FRINTAv2f32;
2002 else if (NumElts == 4)
2003 Opc = AArch64::FRINTAv4f32;
2004 break;
2005 case 64:
2006 if (NumElts == 2)
2007 Opc = AArch64::FRINTAv2f64;
2008 break;
2009 }
2010 }
2011
2012 if (!Opc) {
2013 // Didn't get an opcode above, bail.
2014 LLVM_DEBUG(dbgs() << "Unsupported type for G_INTRINSIC_ROUND!\n");
2015 return false;
2016 }
2017
2018 // Legalization would have set us up perfectly for this; we just need to
2019 // set the opcode and move on.
2020 I.setDesc(TII.get(Opc));
2021 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2022}
2023
Amara Emerson9bf092d2019-04-09 21:22:43 +00002024bool AArch64InstructionSelector::selectVectorICmp(
2025 MachineInstr &I, MachineRegisterInfo &MRI) const {
2026 unsigned DstReg = I.getOperand(0).getReg();
2027 LLT DstTy = MRI.getType(DstReg);
2028 unsigned SrcReg = I.getOperand(2).getReg();
2029 unsigned Src2Reg = I.getOperand(3).getReg();
2030 LLT SrcTy = MRI.getType(SrcReg);
2031
2032 unsigned SrcEltSize = SrcTy.getElementType().getSizeInBits();
2033 unsigned NumElts = DstTy.getNumElements();
2034
2035 // First index is element size, 0 == 8b, 1 == 16b, 2 == 32b, 3 == 64b
2036 // Second index is num elts, 0 == v2, 1 == v4, 2 == v8, 3 == v16
2037 // Third index is cc opcode:
2038 // 0 == eq
2039 // 1 == ugt
2040 // 2 == uge
2041 // 3 == ult
2042 // 4 == ule
2043 // 5 == sgt
2044 // 6 == sge
2045 // 7 == slt
2046 // 8 == sle
2047 // ne is done by negating 'eq' result.
2048
2049 // This table below assumes that for some comparisons the operands will be
2050 // commuted.
2051 // ult op == commute + ugt op
2052 // ule op == commute + uge op
2053 // slt op == commute + sgt op
2054 // sle op == commute + sge op
2055 unsigned PredIdx = 0;
2056 bool SwapOperands = false;
2057 CmpInst::Predicate Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
2058 switch (Pred) {
2059 case CmpInst::ICMP_NE:
2060 case CmpInst::ICMP_EQ:
2061 PredIdx = 0;
2062 break;
2063 case CmpInst::ICMP_UGT:
2064 PredIdx = 1;
2065 break;
2066 case CmpInst::ICMP_UGE:
2067 PredIdx = 2;
2068 break;
2069 case CmpInst::ICMP_ULT:
2070 PredIdx = 3;
2071 SwapOperands = true;
2072 break;
2073 case CmpInst::ICMP_ULE:
2074 PredIdx = 4;
2075 SwapOperands = true;
2076 break;
2077 case CmpInst::ICMP_SGT:
2078 PredIdx = 5;
2079 break;
2080 case CmpInst::ICMP_SGE:
2081 PredIdx = 6;
2082 break;
2083 case CmpInst::ICMP_SLT:
2084 PredIdx = 7;
2085 SwapOperands = true;
2086 break;
2087 case CmpInst::ICMP_SLE:
2088 PredIdx = 8;
2089 SwapOperands = true;
2090 break;
2091 default:
2092 llvm_unreachable("Unhandled icmp predicate");
2093 return false;
2094 }
2095
2096 // This table obviously should be tablegen'd when we have our GISel native
2097 // tablegen selector.
2098
2099 static const unsigned OpcTable[4][4][9] = {
2100 {
2101 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2102 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2103 0 /* invalid */},
2104 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2105 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2106 0 /* invalid */},
2107 {AArch64::CMEQv8i8, AArch64::CMHIv8i8, AArch64::CMHSv8i8,
2108 AArch64::CMHIv8i8, AArch64::CMHSv8i8, AArch64::CMGTv8i8,
2109 AArch64::CMGEv8i8, AArch64::CMGTv8i8, AArch64::CMGEv8i8},
2110 {AArch64::CMEQv16i8, AArch64::CMHIv16i8, AArch64::CMHSv16i8,
2111 AArch64::CMHIv16i8, AArch64::CMHSv16i8, AArch64::CMGTv16i8,
2112 AArch64::CMGEv16i8, AArch64::CMGTv16i8, AArch64::CMGEv16i8}
2113 },
2114 {
2115 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2116 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2117 0 /* invalid */},
2118 {AArch64::CMEQv4i16, AArch64::CMHIv4i16, AArch64::CMHSv4i16,
2119 AArch64::CMHIv4i16, AArch64::CMHSv4i16, AArch64::CMGTv4i16,
2120 AArch64::CMGEv4i16, AArch64::CMGTv4i16, AArch64::CMGEv4i16},
2121 {AArch64::CMEQv8i16, AArch64::CMHIv8i16, AArch64::CMHSv8i16,
2122 AArch64::CMHIv8i16, AArch64::CMHSv8i16, AArch64::CMGTv8i16,
2123 AArch64::CMGEv8i16, AArch64::CMGTv8i16, AArch64::CMGEv8i16},
2124 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2125 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2126 0 /* invalid */}
2127 },
2128 {
2129 {AArch64::CMEQv2i32, AArch64::CMHIv2i32, AArch64::CMHSv2i32,
2130 AArch64::CMHIv2i32, AArch64::CMHSv2i32, AArch64::CMGTv2i32,
2131 AArch64::CMGEv2i32, AArch64::CMGTv2i32, AArch64::CMGEv2i32},
2132 {AArch64::CMEQv4i32, AArch64::CMHIv4i32, AArch64::CMHSv4i32,
2133 AArch64::CMHIv4i32, AArch64::CMHSv4i32, AArch64::CMGTv4i32,
2134 AArch64::CMGEv4i32, AArch64::CMGTv4i32, AArch64::CMGEv4i32},
2135 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2136 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2137 0 /* invalid */},
2138 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2139 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2140 0 /* invalid */}
2141 },
2142 {
2143 {AArch64::CMEQv2i64, AArch64::CMHIv2i64, AArch64::CMHSv2i64,
2144 AArch64::CMHIv2i64, AArch64::CMHSv2i64, AArch64::CMGTv2i64,
2145 AArch64::CMGEv2i64, AArch64::CMGTv2i64, AArch64::CMGEv2i64},
2146 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2147 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2148 0 /* invalid */},
2149 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2150 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2151 0 /* invalid */},
2152 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2153 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2154 0 /* invalid */}
2155 },
2156 };
2157 unsigned EltIdx = Log2_32(SrcEltSize / 8);
2158 unsigned NumEltsIdx = Log2_32(NumElts / 2);
2159 unsigned Opc = OpcTable[EltIdx][NumEltsIdx][PredIdx];
2160 if (!Opc) {
2161 LLVM_DEBUG(dbgs() << "Could not map G_ICMP to cmp opcode");
2162 return false;
2163 }
2164
2165 const RegisterBank &VecRB = *RBI.getRegBank(SrcReg, MRI, TRI);
2166 const TargetRegisterClass *SrcRC =
2167 getRegClassForTypeOnBank(SrcTy, VecRB, RBI, true);
2168 if (!SrcRC) {
2169 LLVM_DEBUG(dbgs() << "Could not determine source register class.\n");
2170 return false;
2171 }
2172
2173 unsigned NotOpc = Pred == ICmpInst::ICMP_NE ? AArch64::NOTv8i8 : 0;
2174 if (SrcTy.getSizeInBits() == 128)
2175 NotOpc = NotOpc ? AArch64::NOTv16i8 : 0;
2176
2177 if (SwapOperands)
2178 std::swap(SrcReg, Src2Reg);
2179
2180 MachineIRBuilder MIB(I);
2181 auto Cmp = MIB.buildInstr(Opc, {SrcRC}, {SrcReg, Src2Reg});
2182 constrainSelectedInstRegOperands(*Cmp, TII, TRI, RBI);
2183
2184 // Invert if we had a 'ne' cc.
2185 if (NotOpc) {
2186 Cmp = MIB.buildInstr(NotOpc, {DstReg}, {Cmp});
2187 constrainSelectedInstRegOperands(*Cmp, TII, TRI, RBI);
2188 } else {
2189 MIB.buildCopy(DstReg, Cmp.getReg(0));
2190 }
2191 RBI.constrainGenericRegister(DstReg, *SrcRC, MRI);
2192 I.eraseFromParent();
2193 return true;
2194}
2195
Amara Emerson6bcfa1c2019-02-25 18:52:54 +00002196MachineInstr *AArch64InstructionSelector::emitScalarToVector(
Amara Emerson8acb0d92019-03-04 19:16:00 +00002197 unsigned EltSize, const TargetRegisterClass *DstRC, unsigned Scalar,
Amara Emerson6bcfa1c2019-02-25 18:52:54 +00002198 MachineIRBuilder &MIRBuilder) const {
2199 auto Undef = MIRBuilder.buildInstr(TargetOpcode::IMPLICIT_DEF, {DstRC}, {});
Amara Emerson5ec14602018-12-10 18:44:58 +00002200
2201 auto BuildFn = [&](unsigned SubregIndex) {
Amara Emerson6bcfa1c2019-02-25 18:52:54 +00002202 auto Ins =
2203 MIRBuilder
2204 .buildInstr(TargetOpcode::INSERT_SUBREG, {DstRC}, {Undef, Scalar})
2205 .addImm(SubregIndex);
2206 constrainSelectedInstRegOperands(*Undef, TII, TRI, RBI);
2207 constrainSelectedInstRegOperands(*Ins, TII, TRI, RBI);
2208 return &*Ins;
Amara Emerson5ec14602018-12-10 18:44:58 +00002209 };
2210
Amara Emerson8acb0d92019-03-04 19:16:00 +00002211 switch (EltSize) {
Jessica Paquette245047d2019-01-24 22:00:41 +00002212 case 16:
2213 return BuildFn(AArch64::hsub);
Amara Emerson5ec14602018-12-10 18:44:58 +00002214 case 32:
2215 return BuildFn(AArch64::ssub);
2216 case 64:
2217 return BuildFn(AArch64::dsub);
2218 default:
Amara Emerson6bcfa1c2019-02-25 18:52:54 +00002219 return nullptr;
Amara Emerson5ec14602018-12-10 18:44:58 +00002220 }
2221}
2222
Amara Emerson8cb186c2018-12-20 01:11:04 +00002223bool AArch64InstructionSelector::selectMergeValues(
2224 MachineInstr &I, MachineRegisterInfo &MRI) const {
2225 assert(I.getOpcode() == TargetOpcode::G_MERGE_VALUES && "unexpected opcode");
2226 const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
2227 const LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
2228 assert(!DstTy.isVector() && !SrcTy.isVector() && "invalid merge operation");
2229
2230 // At the moment we only support merging two s32s into an s64.
2231 if (I.getNumOperands() != 3)
2232 return false;
2233 if (DstTy.getSizeInBits() != 64 || SrcTy.getSizeInBits() != 32)
2234 return false;
2235 const RegisterBank &RB = *RBI.getRegBank(I.getOperand(1).getReg(), MRI, TRI);
2236 if (RB.getID() != AArch64::GPRRegBankID)
2237 return false;
2238
2239 auto *DstRC = &AArch64::GPR64RegClass;
2240 unsigned SubToRegDef = MRI.createVirtualRegister(DstRC);
2241 MachineInstr &SubRegMI = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
2242 TII.get(TargetOpcode::SUBREG_TO_REG))
2243 .addDef(SubToRegDef)
2244 .addImm(0)
2245 .addUse(I.getOperand(1).getReg())
2246 .addImm(AArch64::sub_32);
2247 unsigned SubToRegDef2 = MRI.createVirtualRegister(DstRC);
2248 // Need to anyext the second scalar before we can use bfm
2249 MachineInstr &SubRegMI2 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
2250 TII.get(TargetOpcode::SUBREG_TO_REG))
2251 .addDef(SubToRegDef2)
2252 .addImm(0)
2253 .addUse(I.getOperand(2).getReg())
2254 .addImm(AArch64::sub_32);
Amara Emerson8cb186c2018-12-20 01:11:04 +00002255 MachineInstr &BFM =
2256 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::BFMXri))
Amara Emerson321bfb22018-12-20 03:27:42 +00002257 .addDef(I.getOperand(0).getReg())
Amara Emerson8cb186c2018-12-20 01:11:04 +00002258 .addUse(SubToRegDef)
2259 .addUse(SubToRegDef2)
2260 .addImm(32)
2261 .addImm(31);
2262 constrainSelectedInstRegOperands(SubRegMI, TII, TRI, RBI);
2263 constrainSelectedInstRegOperands(SubRegMI2, TII, TRI, RBI);
2264 constrainSelectedInstRegOperands(BFM, TII, TRI, RBI);
2265 I.eraseFromParent();
2266 return true;
2267}
2268
Jessica Paquette607774c2019-03-11 22:18:01 +00002269static bool getLaneCopyOpcode(unsigned &CopyOpc, unsigned &ExtractSubReg,
2270 const unsigned EltSize) {
2271 // Choose a lane copy opcode and subregister based off of the size of the
2272 // vector's elements.
2273 switch (EltSize) {
2274 case 16:
2275 CopyOpc = AArch64::CPYi16;
2276 ExtractSubReg = AArch64::hsub;
2277 break;
2278 case 32:
2279 CopyOpc = AArch64::CPYi32;
2280 ExtractSubReg = AArch64::ssub;
2281 break;
2282 case 64:
2283 CopyOpc = AArch64::CPYi64;
2284 ExtractSubReg = AArch64::dsub;
2285 break;
2286 default:
2287 // Unknown size, bail out.
2288 LLVM_DEBUG(dbgs() << "Elt size '" << EltSize << "' unsupported.\n");
2289 return false;
2290 }
2291 return true;
2292}
2293
Amara Emersond61b89b2019-03-14 22:48:18 +00002294MachineInstr *AArch64InstructionSelector::emitExtractVectorElt(
2295 Optional<unsigned> DstReg, const RegisterBank &DstRB, LLT ScalarTy,
2296 unsigned VecReg, unsigned LaneIdx, MachineIRBuilder &MIRBuilder) const {
2297 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
2298 unsigned CopyOpc = 0;
2299 unsigned ExtractSubReg = 0;
2300 if (!getLaneCopyOpcode(CopyOpc, ExtractSubReg, ScalarTy.getSizeInBits())) {
2301 LLVM_DEBUG(
2302 dbgs() << "Couldn't determine lane copy opcode for instruction.\n");
2303 return nullptr;
2304 }
2305
2306 const TargetRegisterClass *DstRC =
2307 getRegClassForTypeOnBank(ScalarTy, DstRB, RBI, true);
2308 if (!DstRC) {
2309 LLVM_DEBUG(dbgs() << "Could not determine destination register class.\n");
2310 return nullptr;
2311 }
2312
2313 const RegisterBank &VecRB = *RBI.getRegBank(VecReg, MRI, TRI);
2314 const LLT &VecTy = MRI.getType(VecReg);
2315 const TargetRegisterClass *VecRC =
2316 getRegClassForTypeOnBank(VecTy, VecRB, RBI, true);
2317 if (!VecRC) {
2318 LLVM_DEBUG(dbgs() << "Could not determine source register class.\n");
2319 return nullptr;
2320 }
2321
2322 // The register that we're going to copy into.
2323 unsigned InsertReg = VecReg;
2324 if (!DstReg)
2325 DstReg = MRI.createVirtualRegister(DstRC);
2326 // If the lane index is 0, we just use a subregister COPY.
2327 if (LaneIdx == 0) {
Amara Emerson86271782019-03-18 19:20:10 +00002328 auto Copy = MIRBuilder.buildInstr(TargetOpcode::COPY, {*DstReg}, {})
2329 .addReg(VecReg, 0, ExtractSubReg);
Amara Emersond61b89b2019-03-14 22:48:18 +00002330 RBI.constrainGenericRegister(*DstReg, *DstRC, MRI);
Amara Emerson3739a202019-03-15 21:59:50 +00002331 return &*Copy;
Amara Emersond61b89b2019-03-14 22:48:18 +00002332 }
2333
2334 // Lane copies require 128-bit wide registers. If we're dealing with an
2335 // unpacked vector, then we need to move up to that width. Insert an implicit
2336 // def and a subregister insert to get us there.
2337 if (VecTy.getSizeInBits() != 128) {
2338 MachineInstr *ScalarToVector = emitScalarToVector(
2339 VecTy.getSizeInBits(), &AArch64::FPR128RegClass, VecReg, MIRBuilder);
2340 if (!ScalarToVector)
2341 return nullptr;
2342 InsertReg = ScalarToVector->getOperand(0).getReg();
2343 }
2344
2345 MachineInstr *LaneCopyMI =
2346 MIRBuilder.buildInstr(CopyOpc, {*DstReg}, {InsertReg}).addImm(LaneIdx);
2347 constrainSelectedInstRegOperands(*LaneCopyMI, TII, TRI, RBI);
2348
2349 // Make sure that we actually constrain the initial copy.
2350 RBI.constrainGenericRegister(*DstReg, *DstRC, MRI);
2351 return LaneCopyMI;
2352}
2353
Jessica Paquette607774c2019-03-11 22:18:01 +00002354bool AArch64InstructionSelector::selectExtractElt(
2355 MachineInstr &I, MachineRegisterInfo &MRI) const {
2356 assert(I.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT &&
2357 "unexpected opcode!");
2358 unsigned DstReg = I.getOperand(0).getReg();
2359 const LLT NarrowTy = MRI.getType(DstReg);
2360 const unsigned SrcReg = I.getOperand(1).getReg();
2361 const LLT WideTy = MRI.getType(SrcReg);
Amara Emersond61b89b2019-03-14 22:48:18 +00002362 (void)WideTy;
Jessica Paquette607774c2019-03-11 22:18:01 +00002363 assert(WideTy.getSizeInBits() >= NarrowTy.getSizeInBits() &&
2364 "source register size too small!");
2365 assert(NarrowTy.isScalar() && "cannot extract vector into vector!");
2366
2367 // Need the lane index to determine the correct copy opcode.
2368 MachineOperand &LaneIdxOp = I.getOperand(2);
2369 assert(LaneIdxOp.isReg() && "Lane index operand was not a register?");
2370
2371 if (RBI.getRegBank(DstReg, MRI, TRI)->getID() != AArch64::FPRRegBankID) {
2372 LLVM_DEBUG(dbgs() << "Cannot extract into GPR.\n");
2373 return false;
2374 }
2375
Jessica Paquettebb1aced2019-03-13 21:19:29 +00002376 // Find the index to extract from.
Jessica Paquette76f64b62019-04-26 21:53:13 +00002377 auto VRegAndVal = getConstantVRegValWithLookThrough(LaneIdxOp.getReg(), MRI);
2378 if (!VRegAndVal)
Jessica Paquette607774c2019-03-11 22:18:01 +00002379 return false;
Jessica Paquette76f64b62019-04-26 21:53:13 +00002380 unsigned LaneIdx = VRegAndVal->Value;
Jessica Paquette607774c2019-03-11 22:18:01 +00002381
Jessica Paquette607774c2019-03-11 22:18:01 +00002382 MachineIRBuilder MIRBuilder(I);
2383
Amara Emersond61b89b2019-03-14 22:48:18 +00002384 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
2385 MachineInstr *Extract = emitExtractVectorElt(DstReg, DstRB, NarrowTy, SrcReg,
2386 LaneIdx, MIRBuilder);
2387 if (!Extract)
2388 return false;
2389
2390 I.eraseFromParent();
2391 return true;
2392}
2393
2394bool AArch64InstructionSelector::selectSplitVectorUnmerge(
2395 MachineInstr &I, MachineRegisterInfo &MRI) const {
2396 unsigned NumElts = I.getNumOperands() - 1;
2397 unsigned SrcReg = I.getOperand(NumElts).getReg();
2398 const LLT NarrowTy = MRI.getType(I.getOperand(0).getReg());
2399 const LLT SrcTy = MRI.getType(SrcReg);
2400
2401 assert(NarrowTy.isVector() && "Expected an unmerge into vectors");
2402 if (SrcTy.getSizeInBits() > 128) {
2403 LLVM_DEBUG(dbgs() << "Unexpected vector type for vec split unmerge");
2404 return false;
Jessica Paquette607774c2019-03-11 22:18:01 +00002405 }
2406
Amara Emersond61b89b2019-03-14 22:48:18 +00002407 MachineIRBuilder MIB(I);
2408
2409 // We implement a split vector operation by treating the sub-vectors as
2410 // scalars and extracting them.
2411 const RegisterBank &DstRB =
2412 *RBI.getRegBank(I.getOperand(0).getReg(), MRI, TRI);
2413 for (unsigned OpIdx = 0; OpIdx < NumElts; ++OpIdx) {
2414 unsigned Dst = I.getOperand(OpIdx).getReg();
2415 MachineInstr *Extract =
2416 emitExtractVectorElt(Dst, DstRB, NarrowTy, SrcReg, OpIdx, MIB);
2417 if (!Extract)
Jessica Paquette607774c2019-03-11 22:18:01 +00002418 return false;
Jessica Paquette607774c2019-03-11 22:18:01 +00002419 }
Jessica Paquette607774c2019-03-11 22:18:01 +00002420 I.eraseFromParent();
2421 return true;
2422}
2423
Jessica Paquette245047d2019-01-24 22:00:41 +00002424bool AArch64InstructionSelector::selectUnmergeValues(
2425 MachineInstr &I, MachineRegisterInfo &MRI) const {
2426 assert(I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2427 "unexpected opcode");
2428
2429 // TODO: Handle unmerging into GPRs and from scalars to scalars.
2430 if (RBI.getRegBank(I.getOperand(0).getReg(), MRI, TRI)->getID() !=
2431 AArch64::FPRRegBankID ||
2432 RBI.getRegBank(I.getOperand(1).getReg(), MRI, TRI)->getID() !=
2433 AArch64::FPRRegBankID) {
2434 LLVM_DEBUG(dbgs() << "Unmerging vector-to-gpr and scalar-to-scalar "
2435 "currently unsupported.\n");
2436 return false;
2437 }
2438
2439 // The last operand is the vector source register, and every other operand is
2440 // a register to unpack into.
2441 unsigned NumElts = I.getNumOperands() - 1;
2442 unsigned SrcReg = I.getOperand(NumElts).getReg();
2443 const LLT NarrowTy = MRI.getType(I.getOperand(0).getReg());
2444 const LLT WideTy = MRI.getType(SrcReg);
Benjamin Kramer653020d2019-01-24 23:45:07 +00002445 (void)WideTy;
Jessica Paquette245047d2019-01-24 22:00:41 +00002446 assert(WideTy.isVector() && "can only unmerge from vector types!");
2447 assert(WideTy.getSizeInBits() > NarrowTy.getSizeInBits() &&
2448 "source register size too small!");
2449
Amara Emersond61b89b2019-03-14 22:48:18 +00002450 if (!NarrowTy.isScalar())
2451 return selectSplitVectorUnmerge(I, MRI);
Jessica Paquette245047d2019-01-24 22:00:41 +00002452
Amara Emerson3739a202019-03-15 21:59:50 +00002453 MachineIRBuilder MIB(I);
2454
Jessica Paquette245047d2019-01-24 22:00:41 +00002455 // Choose a lane copy opcode and subregister based off of the size of the
2456 // vector's elements.
2457 unsigned CopyOpc = 0;
2458 unsigned ExtractSubReg = 0;
Jessica Paquette607774c2019-03-11 22:18:01 +00002459 if (!getLaneCopyOpcode(CopyOpc, ExtractSubReg, NarrowTy.getSizeInBits()))
Jessica Paquette245047d2019-01-24 22:00:41 +00002460 return false;
Jessica Paquette245047d2019-01-24 22:00:41 +00002461
2462 // Set up for the lane copies.
2463 MachineBasicBlock &MBB = *I.getParent();
2464
2465 // Stores the registers we'll be copying from.
2466 SmallVector<unsigned, 4> InsertRegs;
2467
2468 // We'll use the first register twice, so we only need NumElts-1 registers.
2469 unsigned NumInsertRegs = NumElts - 1;
2470
2471 // If our elements fit into exactly 128 bits, then we can copy from the source
2472 // directly. Otherwise, we need to do a bit of setup with some subregister
2473 // inserts.
2474 if (NarrowTy.getSizeInBits() * NumElts == 128) {
2475 InsertRegs = SmallVector<unsigned, 4>(NumInsertRegs, SrcReg);
2476 } else {
2477 // No. We have to perform subregister inserts. For each insert, create an
2478 // implicit def and a subregister insert, and save the register we create.
2479 for (unsigned Idx = 0; Idx < NumInsertRegs; ++Idx) {
2480 unsigned ImpDefReg = MRI.createVirtualRegister(&AArch64::FPR128RegClass);
2481 MachineInstr &ImpDefMI =
2482 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(TargetOpcode::IMPLICIT_DEF),
2483 ImpDefReg);
2484
2485 // Now, create the subregister insert from SrcReg.
2486 unsigned InsertReg = MRI.createVirtualRegister(&AArch64::FPR128RegClass);
2487 MachineInstr &InsMI =
2488 *BuildMI(MBB, I, I.getDebugLoc(),
2489 TII.get(TargetOpcode::INSERT_SUBREG), InsertReg)
2490 .addUse(ImpDefReg)
2491 .addUse(SrcReg)
2492 .addImm(AArch64::dsub);
2493
2494 constrainSelectedInstRegOperands(ImpDefMI, TII, TRI, RBI);
2495 constrainSelectedInstRegOperands(InsMI, TII, TRI, RBI);
2496
2497 // Save the register so that we can copy from it after.
2498 InsertRegs.push_back(InsertReg);
2499 }
2500 }
2501
2502 // Now that we've created any necessary subregister inserts, we can
2503 // create the copies.
2504 //
2505 // Perform the first copy separately as a subregister copy.
2506 unsigned CopyTo = I.getOperand(0).getReg();
Amara Emerson86271782019-03-18 19:20:10 +00002507 auto FirstCopy = MIB.buildInstr(TargetOpcode::COPY, {CopyTo}, {})
2508 .addReg(InsertRegs[0], 0, ExtractSubReg);
Amara Emerson3739a202019-03-15 21:59:50 +00002509 constrainSelectedInstRegOperands(*FirstCopy, TII, TRI, RBI);
Jessica Paquette245047d2019-01-24 22:00:41 +00002510
2511 // Now, perform the remaining copies as vector lane copies.
2512 unsigned LaneIdx = 1;
2513 for (unsigned InsReg : InsertRegs) {
2514 unsigned CopyTo = I.getOperand(LaneIdx).getReg();
2515 MachineInstr &CopyInst =
2516 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CopyOpc), CopyTo)
2517 .addUse(InsReg)
2518 .addImm(LaneIdx);
2519 constrainSelectedInstRegOperands(CopyInst, TII, TRI, RBI);
2520 ++LaneIdx;
2521 }
2522
2523 // Separately constrain the first copy's destination. Because of the
2524 // limitation in constrainOperandRegClass, we can't guarantee that this will
2525 // actually be constrained. So, do it ourselves using the second operand.
2526 const TargetRegisterClass *RC =
2527 MRI.getRegClassOrNull(I.getOperand(1).getReg());
2528 if (!RC) {
2529 LLVM_DEBUG(dbgs() << "Couldn't constrain copy destination.\n");
2530 return false;
2531 }
2532
2533 RBI.constrainGenericRegister(CopyTo, *RC, MRI);
2534 I.eraseFromParent();
2535 return true;
2536}
2537
Amara Emerson2ff22982019-03-14 22:48:15 +00002538bool AArch64InstructionSelector::selectConcatVectors(
2539 MachineInstr &I, MachineRegisterInfo &MRI) const {
2540 assert(I.getOpcode() == TargetOpcode::G_CONCAT_VECTORS &&
2541 "Unexpected opcode");
2542 unsigned Dst = I.getOperand(0).getReg();
2543 unsigned Op1 = I.getOperand(1).getReg();
2544 unsigned Op2 = I.getOperand(2).getReg();
2545 MachineIRBuilder MIRBuilder(I);
2546 MachineInstr *ConcatMI = emitVectorConcat(Dst, Op1, Op2, MIRBuilder);
2547 if (!ConcatMI)
2548 return false;
2549 I.eraseFromParent();
2550 return true;
2551}
2552
Amara Emerson1abe05c2019-02-21 20:20:16 +00002553void AArch64InstructionSelector::collectShuffleMaskIndices(
2554 MachineInstr &I, MachineRegisterInfo &MRI,
Amara Emerson2806fd02019-04-12 21:31:21 +00002555 SmallVectorImpl<Optional<int>> &Idxs) const {
Amara Emerson1abe05c2019-02-21 20:20:16 +00002556 MachineInstr *MaskDef = MRI.getVRegDef(I.getOperand(3).getReg());
2557 assert(
2558 MaskDef->getOpcode() == TargetOpcode::G_BUILD_VECTOR &&
2559 "G_SHUFFLE_VECTOR should have a constant mask operand as G_BUILD_VECTOR");
2560 // Find the constant indices.
2561 for (unsigned i = 1, e = MaskDef->getNumOperands(); i < e; ++i) {
2562 MachineInstr *ScalarDef = MRI.getVRegDef(MaskDef->getOperand(i).getReg());
2563 assert(ScalarDef && "Could not find vreg def of shufflevec index op");
2564 // Look through copies.
2565 while (ScalarDef->getOpcode() == TargetOpcode::COPY) {
2566 ScalarDef = MRI.getVRegDef(ScalarDef->getOperand(1).getReg());
2567 assert(ScalarDef && "Could not find def of copy operand");
2568 }
Amara Emerson2806fd02019-04-12 21:31:21 +00002569 if (ScalarDef->getOpcode() != TargetOpcode::G_CONSTANT) {
2570 // This be an undef if not a constant.
2571 assert(ScalarDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
2572 Idxs.push_back(None);
2573 } else {
2574 Idxs.push_back(ScalarDef->getOperand(1).getCImm()->getSExtValue());
2575 }
Amara Emerson1abe05c2019-02-21 20:20:16 +00002576 }
2577}
2578
2579unsigned
2580AArch64InstructionSelector::emitConstantPoolEntry(Constant *CPVal,
2581 MachineFunction &MF) const {
Hans Wennborg5d5ee4a2019-04-26 08:31:00 +00002582 Type *CPTy = CPVal->getType();
Amara Emerson1abe05c2019-02-21 20:20:16 +00002583 unsigned Align = MF.getDataLayout().getPrefTypeAlignment(CPTy);
2584 if (Align == 0)
2585 Align = MF.getDataLayout().getTypeAllocSize(CPTy);
2586
2587 MachineConstantPool *MCP = MF.getConstantPool();
2588 return MCP->getConstantPoolIndex(CPVal, Align);
2589}
2590
2591MachineInstr *AArch64InstructionSelector::emitLoadFromConstantPool(
2592 Constant *CPVal, MachineIRBuilder &MIRBuilder) const {
2593 unsigned CPIdx = emitConstantPoolEntry(CPVal, MIRBuilder.getMF());
2594
2595 auto Adrp =
2596 MIRBuilder.buildInstr(AArch64::ADRP, {&AArch64::GPR64RegClass}, {})
2597 .addConstantPoolIndex(CPIdx, 0, AArch64II::MO_PAGE);
Amara Emerson8acb0d92019-03-04 19:16:00 +00002598
2599 MachineInstr *LoadMI = nullptr;
2600 switch (MIRBuilder.getDataLayout().getTypeStoreSize(CPVal->getType())) {
2601 case 16:
2602 LoadMI =
2603 &*MIRBuilder
2604 .buildInstr(AArch64::LDRQui, {&AArch64::FPR128RegClass}, {Adrp})
2605 .addConstantPoolIndex(CPIdx, 0,
2606 AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2607 break;
2608 case 8:
2609 LoadMI = &*MIRBuilder
2610 .buildInstr(AArch64::LDRDui, {&AArch64::FPR64RegClass}, {Adrp})
2611 .addConstantPoolIndex(
2612 CPIdx, 0, AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2613 break;
2614 default:
2615 LLVM_DEBUG(dbgs() << "Could not load from constant pool of type "
2616 << *CPVal->getType());
2617 return nullptr;
2618 }
Amara Emerson1abe05c2019-02-21 20:20:16 +00002619 constrainSelectedInstRegOperands(*Adrp, TII, TRI, RBI);
Amara Emerson8acb0d92019-03-04 19:16:00 +00002620 constrainSelectedInstRegOperands(*LoadMI, TII, TRI, RBI);
2621 return LoadMI;
2622}
2623
2624/// Return an <Opcode, SubregIndex> pair to do an vector elt insert of a given
2625/// size and RB.
2626static std::pair<unsigned, unsigned>
2627getInsertVecEltOpInfo(const RegisterBank &RB, unsigned EltSize) {
2628 unsigned Opc, SubregIdx;
2629 if (RB.getID() == AArch64::GPRRegBankID) {
2630 if (EltSize == 32) {
2631 Opc = AArch64::INSvi32gpr;
2632 SubregIdx = AArch64::ssub;
2633 } else if (EltSize == 64) {
2634 Opc = AArch64::INSvi64gpr;
2635 SubregIdx = AArch64::dsub;
2636 } else {
2637 llvm_unreachable("invalid elt size!");
2638 }
2639 } else {
2640 if (EltSize == 8) {
2641 Opc = AArch64::INSvi8lane;
2642 SubregIdx = AArch64::bsub;
2643 } else if (EltSize == 16) {
2644 Opc = AArch64::INSvi16lane;
2645 SubregIdx = AArch64::hsub;
2646 } else if (EltSize == 32) {
2647 Opc = AArch64::INSvi32lane;
2648 SubregIdx = AArch64::ssub;
2649 } else if (EltSize == 64) {
2650 Opc = AArch64::INSvi64lane;
2651 SubregIdx = AArch64::dsub;
2652 } else {
2653 llvm_unreachable("invalid elt size!");
2654 }
2655 }
2656 return std::make_pair(Opc, SubregIdx);
2657}
2658
2659MachineInstr *AArch64InstructionSelector::emitVectorConcat(
Amara Emerson2ff22982019-03-14 22:48:15 +00002660 Optional<unsigned> Dst, unsigned Op1, unsigned Op2,
2661 MachineIRBuilder &MIRBuilder) const {
Amara Emerson8acb0d92019-03-04 19:16:00 +00002662 // We implement a vector concat by:
2663 // 1. Use scalar_to_vector to insert the lower vector into the larger dest
2664 // 2. Insert the upper vector into the destination's upper element
2665 // TODO: some of this code is common with G_BUILD_VECTOR handling.
2666 MachineRegisterInfo &MRI = MIRBuilder.getMF().getRegInfo();
2667
2668 const LLT Op1Ty = MRI.getType(Op1);
2669 const LLT Op2Ty = MRI.getType(Op2);
2670
2671 if (Op1Ty != Op2Ty) {
2672 LLVM_DEBUG(dbgs() << "Could not do vector concat of differing vector tys");
2673 return nullptr;
2674 }
2675 assert(Op1Ty.isVector() && "Expected a vector for vector concat");
2676
2677 if (Op1Ty.getSizeInBits() >= 128) {
2678 LLVM_DEBUG(dbgs() << "Vector concat not supported for full size vectors");
2679 return nullptr;
2680 }
2681
2682 // At the moment we just support 64 bit vector concats.
2683 if (Op1Ty.getSizeInBits() != 64) {
2684 LLVM_DEBUG(dbgs() << "Vector concat supported for 64b vectors");
2685 return nullptr;
2686 }
2687
2688 const LLT ScalarTy = LLT::scalar(Op1Ty.getSizeInBits());
2689 const RegisterBank &FPRBank = *RBI.getRegBank(Op1, MRI, TRI);
2690 const TargetRegisterClass *DstRC =
2691 getMinClassForRegBank(FPRBank, Op1Ty.getSizeInBits() * 2);
2692
2693 MachineInstr *WidenedOp1 =
2694 emitScalarToVector(ScalarTy.getSizeInBits(), DstRC, Op1, MIRBuilder);
2695 MachineInstr *WidenedOp2 =
2696 emitScalarToVector(ScalarTy.getSizeInBits(), DstRC, Op2, MIRBuilder);
2697 if (!WidenedOp1 || !WidenedOp2) {
2698 LLVM_DEBUG(dbgs() << "Could not emit a vector from scalar value");
2699 return nullptr;
2700 }
2701
2702 // Now do the insert of the upper element.
2703 unsigned InsertOpc, InsSubRegIdx;
2704 std::tie(InsertOpc, InsSubRegIdx) =
2705 getInsertVecEltOpInfo(FPRBank, ScalarTy.getSizeInBits());
2706
Amara Emerson2ff22982019-03-14 22:48:15 +00002707 if (!Dst)
2708 Dst = MRI.createVirtualRegister(DstRC);
Amara Emerson8acb0d92019-03-04 19:16:00 +00002709 auto InsElt =
2710 MIRBuilder
Amara Emerson2ff22982019-03-14 22:48:15 +00002711 .buildInstr(InsertOpc, {*Dst}, {WidenedOp1->getOperand(0).getReg()})
Amara Emerson8acb0d92019-03-04 19:16:00 +00002712 .addImm(1) /* Lane index */
2713 .addUse(WidenedOp2->getOperand(0).getReg())
2714 .addImm(0);
Amara Emerson8acb0d92019-03-04 19:16:00 +00002715 constrainSelectedInstRegOperands(*InsElt, TII, TRI, RBI);
2716 return &*InsElt;
Amara Emerson1abe05c2019-02-21 20:20:16 +00002717}
2718
Amara Emerson761ca2e2019-03-19 21:43:05 +00002719bool AArch64InstructionSelector::tryOptVectorDup(MachineInstr &I) const {
2720 // Try to match a vector splat operation into a dup instruction.
2721 // We're looking for this pattern:
2722 // %scalar:gpr(s64) = COPY $x0
2723 // %undef:fpr(<2 x s64>) = G_IMPLICIT_DEF
2724 // %cst0:gpr(s32) = G_CONSTANT i32 0
2725 // %zerovec:fpr(<2 x s32>) = G_BUILD_VECTOR %cst0(s32), %cst0(s32)
2726 // %ins:fpr(<2 x s64>) = G_INSERT_VECTOR_ELT %undef, %scalar(s64), %cst0(s32)
2727 // %splat:fpr(<2 x s64>) = G_SHUFFLE_VECTOR %ins(<2 x s64>), %undef,
2728 // %zerovec(<2 x s32>)
2729 //
2730 // ...into:
2731 // %splat = DUP %scalar
2732 // We use the regbank of the scalar to determine which kind of dup to use.
2733 MachineIRBuilder MIB(I);
2734 MachineRegisterInfo &MRI = *MIB.getMRI();
2735 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
2736 using namespace TargetOpcode;
2737 using namespace MIPatternMatch;
2738
2739 // Begin matching the insert.
2740 auto *InsMI =
2741 findMIFromReg(I.getOperand(1).getReg(), G_INSERT_VECTOR_ELT, MIB);
2742 if (!InsMI)
2743 return false;
2744 // Match the undef vector operand.
2745 auto *UndefMI =
2746 findMIFromReg(InsMI->getOperand(1).getReg(), G_IMPLICIT_DEF, MIB);
2747 if (!UndefMI)
2748 return false;
2749 // Match the scalar being splatted.
2750 unsigned ScalarReg = InsMI->getOperand(2).getReg();
2751 const RegisterBank *ScalarRB = RBI.getRegBank(ScalarReg, MRI, TRI);
2752 // Match the index constant 0.
2753 int64_t Index = 0;
2754 if (!mi_match(InsMI->getOperand(3).getReg(), MRI, m_ICst(Index)) || Index)
2755 return false;
2756
2757 // The shuffle's second operand doesn't matter if the mask is all zero.
2758 auto *ZeroVec = findMIFromReg(I.getOperand(3).getReg(), G_BUILD_VECTOR, MIB);
2759 if (!ZeroVec)
2760 return false;
2761 int64_t Zero = 0;
2762 if (!mi_match(ZeroVec->getOperand(1).getReg(), MRI, m_ICst(Zero)) || Zero)
2763 return false;
2764 for (unsigned i = 1, e = ZeroVec->getNumOperands() - 1; i < e; ++i) {
2765 if (ZeroVec->getOperand(i).getReg() != ZeroVec->getOperand(1).getReg())
2766 return false; // This wasn't an all zeros vector.
2767 }
2768
2769 // We're done, now find out what kind of splat we need.
2770 LLT VecTy = MRI.getType(I.getOperand(0).getReg());
2771 LLT EltTy = VecTy.getElementType();
2772 if (VecTy.getSizeInBits() != 128 || EltTy.getSizeInBits() < 32) {
2773 LLVM_DEBUG(dbgs() << "Could not optimize splat pattern < 128b yet");
2774 return false;
2775 }
2776 bool IsFP = ScalarRB->getID() == AArch64::FPRRegBankID;
2777 static const unsigned OpcTable[2][2] = {
2778 {AArch64::DUPv4i32gpr, AArch64::DUPv2i64gpr},
2779 {AArch64::DUPv4i32lane, AArch64::DUPv2i64lane}};
2780 unsigned Opc = OpcTable[IsFP][EltTy.getSizeInBits() == 64];
2781
2782 // For FP splats, we need to widen the scalar reg via undef too.
2783 if (IsFP) {
2784 MachineInstr *Widen = emitScalarToVector(
2785 EltTy.getSizeInBits(), &AArch64::FPR128RegClass, ScalarReg, MIB);
2786 if (!Widen)
2787 return false;
2788 ScalarReg = Widen->getOperand(0).getReg();
2789 }
2790 auto Dup = MIB.buildInstr(Opc, {I.getOperand(0).getReg()}, {ScalarReg});
2791 if (IsFP)
2792 Dup.addImm(0);
2793 constrainSelectedInstRegOperands(*Dup, TII, TRI, RBI);
2794 I.eraseFromParent();
2795 return true;
2796}
2797
2798bool AArch64InstructionSelector::tryOptVectorShuffle(MachineInstr &I) const {
2799 if (TM.getOptLevel() == CodeGenOpt::None)
2800 return false;
2801 if (tryOptVectorDup(I))
2802 return true;
2803 return false;
2804}
2805
Amara Emerson1abe05c2019-02-21 20:20:16 +00002806bool AArch64InstructionSelector::selectShuffleVector(
2807 MachineInstr &I, MachineRegisterInfo &MRI) const {
Amara Emerson761ca2e2019-03-19 21:43:05 +00002808 if (tryOptVectorShuffle(I))
2809 return true;
Amara Emerson1abe05c2019-02-21 20:20:16 +00002810 const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
2811 unsigned Src1Reg = I.getOperand(1).getReg();
2812 const LLT Src1Ty = MRI.getType(Src1Reg);
2813 unsigned Src2Reg = I.getOperand(2).getReg();
2814 const LLT Src2Ty = MRI.getType(Src2Reg);
2815
2816 MachineBasicBlock &MBB = *I.getParent();
2817 MachineFunction &MF = *MBB.getParent();
2818 LLVMContext &Ctx = MF.getFunction().getContext();
2819
2820 // G_SHUFFLE_VECTOR doesn't really have a strictly enforced constant mask
2821 // operand, it comes in as a normal vector value which we have to analyze to
Amara Emerson2806fd02019-04-12 21:31:21 +00002822 // find the mask indices. If the mask element is undef, then
2823 // collectShuffleMaskIndices() will add a None entry for that index into
2824 // the list.
2825 SmallVector<Optional<int>, 8> Mask;
Amara Emerson1abe05c2019-02-21 20:20:16 +00002826 collectShuffleMaskIndices(I, MRI, Mask);
2827 assert(!Mask.empty() && "Expected to find mask indices");
2828
2829 // G_SHUFFLE_VECTOR is weird in that the source operands can be scalars, if
2830 // it's originated from a <1 x T> type. Those should have been lowered into
2831 // G_BUILD_VECTOR earlier.
2832 if (!Src1Ty.isVector() || !Src2Ty.isVector()) {
2833 LLVM_DEBUG(dbgs() << "Could not select a \"scalar\" G_SHUFFLE_VECTOR\n");
2834 return false;
2835 }
2836
2837 unsigned BytesPerElt = DstTy.getElementType().getSizeInBits() / 8;
2838
2839 SmallVector<Constant *, 64> CstIdxs;
Amara Emerson2806fd02019-04-12 21:31:21 +00002840 for (auto &MaybeVal : Mask) {
2841 // For now, any undef indexes we'll just assume to be 0. This should be
2842 // optimized in future, e.g. to select DUP etc.
2843 int Val = MaybeVal.hasValue() ? *MaybeVal : 0;
Amara Emerson1abe05c2019-02-21 20:20:16 +00002844 for (unsigned Byte = 0; Byte < BytesPerElt; ++Byte) {
2845 unsigned Offset = Byte + Val * BytesPerElt;
2846 CstIdxs.emplace_back(ConstantInt::get(Type::getInt8Ty(Ctx), Offset));
2847 }
2848 }
2849
Amara Emerson8acb0d92019-03-04 19:16:00 +00002850 MachineIRBuilder MIRBuilder(I);
Amara Emerson1abe05c2019-02-21 20:20:16 +00002851
2852 // Use a constant pool to load the index vector for TBL.
2853 Constant *CPVal = ConstantVector::get(CstIdxs);
Amara Emerson1abe05c2019-02-21 20:20:16 +00002854 MachineInstr *IndexLoad = emitLoadFromConstantPool(CPVal, MIRBuilder);
2855 if (!IndexLoad) {
2856 LLVM_DEBUG(dbgs() << "Could not load from a constant pool");
2857 return false;
2858 }
2859
Amara Emerson8acb0d92019-03-04 19:16:00 +00002860 if (DstTy.getSizeInBits() != 128) {
2861 assert(DstTy.getSizeInBits() == 64 && "Unexpected shuffle result ty");
2862 // This case can be done with TBL1.
Amara Emerson2ff22982019-03-14 22:48:15 +00002863 MachineInstr *Concat = emitVectorConcat(None, Src1Reg, Src2Reg, MIRBuilder);
Amara Emerson8acb0d92019-03-04 19:16:00 +00002864 if (!Concat) {
2865 LLVM_DEBUG(dbgs() << "Could not do vector concat for tbl1");
2866 return false;
2867 }
2868
2869 // The constant pool load will be 64 bits, so need to convert to FPR128 reg.
2870 IndexLoad =
2871 emitScalarToVector(64, &AArch64::FPR128RegClass,
2872 IndexLoad->getOperand(0).getReg(), MIRBuilder);
2873
2874 auto TBL1 = MIRBuilder.buildInstr(
2875 AArch64::TBLv16i8One, {&AArch64::FPR128RegClass},
2876 {Concat->getOperand(0).getReg(), IndexLoad->getOperand(0).getReg()});
2877 constrainSelectedInstRegOperands(*TBL1, TII, TRI, RBI);
2878
Amara Emerson3739a202019-03-15 21:59:50 +00002879 auto Copy =
Amara Emerson86271782019-03-18 19:20:10 +00002880 MIRBuilder
2881 .buildInstr(TargetOpcode::COPY, {I.getOperand(0).getReg()}, {})
2882 .addReg(TBL1.getReg(0), 0, AArch64::dsub);
Amara Emerson8acb0d92019-03-04 19:16:00 +00002883 RBI.constrainGenericRegister(Copy.getReg(0), AArch64::FPR64RegClass, MRI);
2884 I.eraseFromParent();
2885 return true;
2886 }
2887
Amara Emerson1abe05c2019-02-21 20:20:16 +00002888 // For TBL2 we need to emit a REG_SEQUENCE to tie together two consecutive
2889 // Q registers for regalloc.
2890 auto RegSeq = MIRBuilder
2891 .buildInstr(TargetOpcode::REG_SEQUENCE,
2892 {&AArch64::QQRegClass}, {Src1Reg})
2893 .addImm(AArch64::qsub0)
2894 .addUse(Src2Reg)
2895 .addImm(AArch64::qsub1);
2896
2897 auto TBL2 =
2898 MIRBuilder.buildInstr(AArch64::TBLv16i8Two, {I.getOperand(0).getReg()},
2899 {RegSeq, IndexLoad->getOperand(0).getReg()});
2900 constrainSelectedInstRegOperands(*RegSeq, TII, TRI, RBI);
2901 constrainSelectedInstRegOperands(*TBL2, TII, TRI, RBI);
2902 I.eraseFromParent();
2903 return true;
2904}
2905
Jessica Paquette16d67a32019-03-13 23:22:23 +00002906MachineInstr *AArch64InstructionSelector::emitLaneInsert(
2907 Optional<unsigned> DstReg, unsigned SrcReg, unsigned EltReg,
2908 unsigned LaneIdx, const RegisterBank &RB,
2909 MachineIRBuilder &MIRBuilder) const {
2910 MachineInstr *InsElt = nullptr;
2911 const TargetRegisterClass *DstRC = &AArch64::FPR128RegClass;
2912 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
2913
2914 // Create a register to define with the insert if one wasn't passed in.
2915 if (!DstReg)
2916 DstReg = MRI.createVirtualRegister(DstRC);
2917
2918 unsigned EltSize = MRI.getType(EltReg).getSizeInBits();
2919 unsigned Opc = getInsertVecEltOpInfo(RB, EltSize).first;
2920
2921 if (RB.getID() == AArch64::FPRRegBankID) {
2922 auto InsSub = emitScalarToVector(EltSize, DstRC, EltReg, MIRBuilder);
2923 InsElt = MIRBuilder.buildInstr(Opc, {*DstReg}, {SrcReg})
2924 .addImm(LaneIdx)
2925 .addUse(InsSub->getOperand(0).getReg())
2926 .addImm(0);
2927 } else {
2928 InsElt = MIRBuilder.buildInstr(Opc, {*DstReg}, {SrcReg})
2929 .addImm(LaneIdx)
2930 .addUse(EltReg);
2931 }
2932
2933 constrainSelectedInstRegOperands(*InsElt, TII, TRI, RBI);
2934 return InsElt;
2935}
2936
Jessica Paquette5aff1f42019-03-14 18:01:30 +00002937bool AArch64InstructionSelector::selectInsertElt(
2938 MachineInstr &I, MachineRegisterInfo &MRI) const {
2939 assert(I.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT);
2940
2941 // Get information on the destination.
2942 unsigned DstReg = I.getOperand(0).getReg();
2943 const LLT DstTy = MRI.getType(DstReg);
Jessica Paquetted3ffd472019-03-29 21:39:36 +00002944 unsigned VecSize = DstTy.getSizeInBits();
Jessica Paquette5aff1f42019-03-14 18:01:30 +00002945
2946 // Get information on the element we want to insert into the destination.
2947 unsigned EltReg = I.getOperand(2).getReg();
2948 const LLT EltTy = MRI.getType(EltReg);
2949 unsigned EltSize = EltTy.getSizeInBits();
2950 if (EltSize < 16 || EltSize > 64)
2951 return false; // Don't support all element types yet.
2952
2953 // Find the definition of the index. Bail out if it's not defined by a
2954 // G_CONSTANT.
2955 unsigned IdxReg = I.getOperand(3).getReg();
Jessica Paquette76f64b62019-04-26 21:53:13 +00002956 auto VRegAndVal = getConstantVRegValWithLookThrough(IdxReg, MRI);
2957 if (!VRegAndVal)
Jessica Paquette5aff1f42019-03-14 18:01:30 +00002958 return false;
Jessica Paquette76f64b62019-04-26 21:53:13 +00002959 unsigned LaneIdx = VRegAndVal->Value;
Jessica Paquette5aff1f42019-03-14 18:01:30 +00002960
2961 // Perform the lane insert.
2962 unsigned SrcReg = I.getOperand(1).getReg();
2963 const RegisterBank &EltRB = *RBI.getRegBank(EltReg, MRI, TRI);
2964 MachineIRBuilder MIRBuilder(I);
Jessica Paquetted3ffd472019-03-29 21:39:36 +00002965
2966 if (VecSize < 128) {
2967 // If the vector we're inserting into is smaller than 128 bits, widen it
2968 // to 128 to do the insert.
2969 MachineInstr *ScalarToVec = emitScalarToVector(
2970 VecSize, &AArch64::FPR128RegClass, SrcReg, MIRBuilder);
2971 if (!ScalarToVec)
2972 return false;
2973 SrcReg = ScalarToVec->getOperand(0).getReg();
2974 }
2975
2976 // Create an insert into a new FPR128 register.
2977 // Note that if our vector is already 128 bits, we end up emitting an extra
2978 // register.
2979 MachineInstr *InsMI =
2980 emitLaneInsert(None, SrcReg, EltReg, LaneIdx, EltRB, MIRBuilder);
2981
2982 if (VecSize < 128) {
2983 // If we had to widen to perform the insert, then we have to demote back to
2984 // the original size to get the result we want.
2985 unsigned DemoteVec = InsMI->getOperand(0).getReg();
2986 const TargetRegisterClass *RC =
2987 getMinClassForRegBank(*RBI.getRegBank(DemoteVec, MRI, TRI), VecSize);
2988 if (RC != &AArch64::FPR32RegClass && RC != &AArch64::FPR64RegClass) {
2989 LLVM_DEBUG(dbgs() << "Unsupported register class!\n");
2990 return false;
2991 }
2992 unsigned SubReg = 0;
2993 if (!getSubRegForClass(RC, TRI, SubReg))
2994 return false;
2995 if (SubReg != AArch64::ssub && SubReg != AArch64::dsub) {
2996 LLVM_DEBUG(dbgs() << "Unsupported destination size! (" << VecSize
2997 << "\n");
2998 return false;
2999 }
3000 MIRBuilder.buildInstr(TargetOpcode::COPY, {DstReg}, {})
3001 .addReg(DemoteVec, 0, SubReg);
3002 RBI.constrainGenericRegister(DstReg, *RC, MRI);
3003 } else {
3004 // No widening needed.
3005 InsMI->getOperand(0).setReg(DstReg);
3006 constrainSelectedInstRegOperands(*InsMI, TII, TRI, RBI);
3007 }
3008
Jessica Paquette5aff1f42019-03-14 18:01:30 +00003009 I.eraseFromParent();
3010 return true;
3011}
3012
Amara Emerson5ec14602018-12-10 18:44:58 +00003013bool AArch64InstructionSelector::selectBuildVector(
3014 MachineInstr &I, MachineRegisterInfo &MRI) const {
3015 assert(I.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
3016 // Until we port more of the optimized selections, for now just use a vector
3017 // insert sequence.
3018 const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
3019 const LLT EltTy = MRI.getType(I.getOperand(1).getReg());
3020 unsigned EltSize = EltTy.getSizeInBits();
Jessica Paquette245047d2019-01-24 22:00:41 +00003021 if (EltSize < 16 || EltSize > 64)
Amara Emerson5ec14602018-12-10 18:44:58 +00003022 return false; // Don't support all element types yet.
3023 const RegisterBank &RB = *RBI.getRegBank(I.getOperand(1).getReg(), MRI, TRI);
Amara Emerson6bcfa1c2019-02-25 18:52:54 +00003024 MachineIRBuilder MIRBuilder(I);
Jessica Paquette245047d2019-01-24 22:00:41 +00003025
3026 const TargetRegisterClass *DstRC = &AArch64::FPR128RegClass;
Amara Emerson6bcfa1c2019-02-25 18:52:54 +00003027 MachineInstr *ScalarToVec =
Amara Emerson8acb0d92019-03-04 19:16:00 +00003028 emitScalarToVector(DstTy.getElementType().getSizeInBits(), DstRC,
3029 I.getOperand(1).getReg(), MIRBuilder);
Amara Emerson6bcfa1c2019-02-25 18:52:54 +00003030 if (!ScalarToVec)
Jessica Paquette245047d2019-01-24 22:00:41 +00003031 return false;
3032
Amara Emerson6bcfa1c2019-02-25 18:52:54 +00003033 unsigned DstVec = ScalarToVec->getOperand(0).getReg();
Jessica Paquette245047d2019-01-24 22:00:41 +00003034 unsigned DstSize = DstTy.getSizeInBits();
3035
3036 // Keep track of the last MI we inserted. Later on, we might be able to save
3037 // a copy using it.
3038 MachineInstr *PrevMI = nullptr;
3039 for (unsigned i = 2, e = DstSize / EltSize + 1; i < e; ++i) {
Jessica Paquette16d67a32019-03-13 23:22:23 +00003040 // Note that if we don't do a subregister copy, we can end up making an
3041 // extra register.
3042 PrevMI = &*emitLaneInsert(None, DstVec, I.getOperand(i).getReg(), i - 1, RB,
3043 MIRBuilder);
3044 DstVec = PrevMI->getOperand(0).getReg();
Amara Emerson5ec14602018-12-10 18:44:58 +00003045 }
Jessica Paquette245047d2019-01-24 22:00:41 +00003046
3047 // If DstTy's size in bits is less than 128, then emit a subregister copy
3048 // from DstVec to the last register we've defined.
3049 if (DstSize < 128) {
Jessica Paquette85ace622019-03-13 23:29:54 +00003050 // Force this to be FPR using the destination vector.
3051 const TargetRegisterClass *RC =
3052 getMinClassForRegBank(*RBI.getRegBank(DstVec, MRI, TRI), DstSize);
Jessica Paquette245047d2019-01-24 22:00:41 +00003053 if (!RC)
3054 return false;
Jessica Paquette85ace622019-03-13 23:29:54 +00003055 if (RC != &AArch64::FPR32RegClass && RC != &AArch64::FPR64RegClass) {
3056 LLVM_DEBUG(dbgs() << "Unsupported register class!\n");
3057 return false;
3058 }
3059
3060 unsigned SubReg = 0;
3061 if (!getSubRegForClass(RC, TRI, SubReg))
3062 return false;
3063 if (SubReg != AArch64::ssub && SubReg != AArch64::dsub) {
3064 LLVM_DEBUG(dbgs() << "Unsupported destination size! (" << DstSize
3065 << "\n");
3066 return false;
3067 }
Jessica Paquette245047d2019-01-24 22:00:41 +00003068
3069 unsigned Reg = MRI.createVirtualRegister(RC);
3070 unsigned DstReg = I.getOperand(0).getReg();
3071
Amara Emerson86271782019-03-18 19:20:10 +00003072 MIRBuilder.buildInstr(TargetOpcode::COPY, {DstReg}, {})
3073 .addReg(DstVec, 0, SubReg);
Jessica Paquette245047d2019-01-24 22:00:41 +00003074 MachineOperand &RegOp = I.getOperand(1);
3075 RegOp.setReg(Reg);
3076 RBI.constrainGenericRegister(DstReg, *RC, MRI);
3077 } else {
3078 // We don't need a subregister copy. Save a copy by re-using the
3079 // destination register on the final insert.
3080 assert(PrevMI && "PrevMI was null?");
3081 PrevMI->getOperand(0).setReg(I.getOperand(0).getReg());
3082 constrainSelectedInstRegOperands(*PrevMI, TII, TRI, RBI);
3083 }
3084
Amara Emerson5ec14602018-12-10 18:44:58 +00003085 I.eraseFromParent();
3086 return true;
3087}
3088
Jessica Paquette7f6fe7c2019-04-29 20:58:17 +00003089/// Helper function to find an intrinsic ID on an a MachineInstr. Returns the
3090/// ID if it exists, and 0 otherwise.
3091static unsigned findIntrinsicID(MachineInstr &I) {
3092 auto IntrinOp = find_if(I.operands(), [&](const MachineOperand &Op) {
3093 return Op.isIntrinsicID();
3094 });
3095 if (IntrinOp == I.operands_end())
3096 return 0;
3097 return IntrinOp->getIntrinsicID();
3098}
3099
Jessica Paquette22c62152019-04-02 19:57:26 +00003100/// Helper function to emit the correct opcode for a llvm.aarch64.stlxr
3101/// intrinsic.
3102static unsigned getStlxrOpcode(unsigned NumBytesToStore) {
3103 switch (NumBytesToStore) {
3104 // TODO: 1, 2, and 4 byte stores.
3105 case 8:
3106 return AArch64::STLXRX;
3107 default:
3108 LLVM_DEBUG(dbgs() << "Unexpected number of bytes to store! ("
3109 << NumBytesToStore << ")\n");
3110 break;
3111 }
3112 return 0;
3113}
3114
3115bool AArch64InstructionSelector::selectIntrinsicWithSideEffects(
3116 MachineInstr &I, MachineRegisterInfo &MRI) const {
3117 // Find the intrinsic ID.
Jessica Paquette7f6fe7c2019-04-29 20:58:17 +00003118 unsigned IntrinID = findIntrinsicID(I);
3119 if (!IntrinID)
Jessica Paquette22c62152019-04-02 19:57:26 +00003120 return false;
Jessica Paquette22c62152019-04-02 19:57:26 +00003121 MachineIRBuilder MIRBuilder(I);
3122
3123 // Select the instruction.
3124 switch (IntrinID) {
3125 default:
3126 return false;
3127 case Intrinsic::trap:
3128 MIRBuilder.buildInstr(AArch64::BRK, {}, {}).addImm(1);
3129 break;
3130 case Intrinsic::aarch64_stlxr:
3131 unsigned StatReg = I.getOperand(0).getReg();
3132 assert(RBI.getSizeInBits(StatReg, MRI, TRI) == 32 &&
3133 "Status register must be 32 bits!");
3134 unsigned SrcReg = I.getOperand(2).getReg();
3135
3136 if (RBI.getSizeInBits(SrcReg, MRI, TRI) != 64) {
3137 LLVM_DEBUG(dbgs() << "Only support 64-bit sources right now.\n");
3138 return false;
3139 }
3140
3141 unsigned PtrReg = I.getOperand(3).getReg();
3142 assert(MRI.getType(PtrReg).isPointer() && "Expected pointer operand");
3143
3144 // Expect only one memory operand.
3145 if (!I.hasOneMemOperand())
3146 return false;
3147
3148 const MachineMemOperand *MemOp = *I.memoperands_begin();
3149 unsigned NumBytesToStore = MemOp->getSize();
3150 unsigned Opc = getStlxrOpcode(NumBytesToStore);
3151 if (!Opc)
3152 return false;
3153
3154 auto StoreMI = MIRBuilder.buildInstr(Opc, {StatReg}, {SrcReg, PtrReg});
3155 constrainSelectedInstRegOperands(*StoreMI, TII, TRI, RBI);
3156 }
3157
3158 I.eraseFromParent();
3159 return true;
3160}
3161
Jessica Paquette7f6fe7c2019-04-29 20:58:17 +00003162bool AArch64InstructionSelector::selectIntrinsic(
3163 MachineInstr &I, MachineRegisterInfo &MRI) const {
3164 unsigned IntrinID = findIntrinsicID(I);
3165 if (!IntrinID)
3166 return false;
3167 MachineIRBuilder MIRBuilder(I);
3168
3169 switch (IntrinID) {
3170 default:
3171 break;
3172 case Intrinsic::aarch64_crypto_sha1h:
3173 unsigned DstReg = I.getOperand(0).getReg();
3174 unsigned SrcReg = I.getOperand(2).getReg();
3175
3176 // FIXME: Should this be an assert?
3177 if (MRI.getType(DstReg).getSizeInBits() != 32 ||
3178 MRI.getType(SrcReg).getSizeInBits() != 32)
3179 return false;
3180
3181 // The operation has to happen on FPRs. Set up some new FPR registers for
3182 // the source and destination if they are on GPRs.
3183 if (RBI.getRegBank(SrcReg, MRI, TRI)->getID() != AArch64::FPRRegBankID) {
3184 SrcReg = MRI.createVirtualRegister(&AArch64::FPR32RegClass);
3185 MIRBuilder.buildCopy({SrcReg}, {I.getOperand(2)});
3186
3187 // Make sure the copy ends up getting constrained properly.
3188 RBI.constrainGenericRegister(I.getOperand(2).getReg(),
3189 AArch64::GPR32RegClass, MRI);
3190 }
3191
3192 if (RBI.getRegBank(DstReg, MRI, TRI)->getID() != AArch64::FPRRegBankID)
3193 DstReg = MRI.createVirtualRegister(&AArch64::FPR32RegClass);
3194
3195 // Actually insert the instruction.
3196 auto SHA1Inst = MIRBuilder.buildInstr(AArch64::SHA1Hrr, {DstReg}, {SrcReg});
3197 constrainSelectedInstRegOperands(*SHA1Inst, TII, TRI, RBI);
3198
3199 // Did we create a new register for the destination?
3200 if (DstReg != I.getOperand(0).getReg()) {
3201 // Yep. Copy the result of the instruction back into the original
3202 // destination.
3203 MIRBuilder.buildCopy({I.getOperand(0)}, {DstReg});
3204 RBI.constrainGenericRegister(I.getOperand(0).getReg(),
3205 AArch64::GPR32RegClass, MRI);
3206 }
3207
3208 I.eraseFromParent();
3209 return true;
3210 }
3211 return false;
3212}
3213
Daniel Sanders8a4bae92017-03-14 21:32:08 +00003214/// SelectArithImmed - Select an immediate value that can be represented as
3215/// a 12-bit value shifted left by either 0 or 12. If so, return true with
3216/// Val set to the 12-bit value and Shift set to the shifter operand.
Daniel Sanders1e4569f2017-10-20 20:55:29 +00003217InstructionSelector::ComplexRendererFns
Daniel Sanders2deea182017-04-22 15:11:04 +00003218AArch64InstructionSelector::selectArithImmed(MachineOperand &Root) const {
Daniel Sanders8a4bae92017-03-14 21:32:08 +00003219 MachineInstr &MI = *Root.getParent();
3220 MachineBasicBlock &MBB = *MI.getParent();
3221 MachineFunction &MF = *MBB.getParent();
3222 MachineRegisterInfo &MRI = MF.getRegInfo();
3223
3224 // This function is called from the addsub_shifted_imm ComplexPattern,
3225 // which lists [imm] as the list of opcode it's interested in, however
3226 // we still need to check whether the operand is actually an immediate
3227 // here because the ComplexPattern opcode list is only used in
3228 // root-level opcode matching.
3229 uint64_t Immed;
3230 if (Root.isImm())
3231 Immed = Root.getImm();
3232 else if (Root.isCImm())
3233 Immed = Root.getCImm()->getZExtValue();
3234 else if (Root.isReg()) {
3235 MachineInstr *Def = MRI.getVRegDef(Root.getReg());
3236 if (Def->getOpcode() != TargetOpcode::G_CONSTANT)
Daniel Sandersdf39cba2017-10-15 18:22:54 +00003237 return None;
Daniel Sanders0e642022017-03-16 18:04:50 +00003238 MachineOperand &Op1 = Def->getOperand(1);
3239 if (!Op1.isCImm() || Op1.getCImm()->getBitWidth() > 64)
Daniel Sandersdf39cba2017-10-15 18:22:54 +00003240 return None;
Daniel Sanders0e642022017-03-16 18:04:50 +00003241 Immed = Op1.getCImm()->getZExtValue();
Daniel Sanders8a4bae92017-03-14 21:32:08 +00003242 } else
Daniel Sandersdf39cba2017-10-15 18:22:54 +00003243 return None;
Daniel Sanders8a4bae92017-03-14 21:32:08 +00003244
3245 unsigned ShiftAmt;
3246
3247 if (Immed >> 12 == 0) {
3248 ShiftAmt = 0;
3249 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
3250 ShiftAmt = 12;
3251 Immed = Immed >> 12;
3252 } else
Daniel Sandersdf39cba2017-10-15 18:22:54 +00003253 return None;
Daniel Sanders8a4bae92017-03-14 21:32:08 +00003254
3255 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
Daniel Sandersdf39cba2017-10-15 18:22:54 +00003256 return {{
3257 [=](MachineInstrBuilder &MIB) { MIB.addImm(Immed); },
3258 [=](MachineInstrBuilder &MIB) { MIB.addImm(ShVal); },
3259 }};
Daniel Sanders8a4bae92017-03-14 21:32:08 +00003260}
Daniel Sanders0b5293f2017-04-06 09:49:34 +00003261
Daniel Sandersea8711b2017-10-16 03:36:29 +00003262/// Select a "register plus unscaled signed 9-bit immediate" address. This
3263/// should only match when there is an offset that is not valid for a scaled
3264/// immediate addressing mode. The "Size" argument is the size in bytes of the
3265/// memory reference, which is needed here to know what is valid for a scaled
3266/// immediate.
Daniel Sanders1e4569f2017-10-20 20:55:29 +00003267InstructionSelector::ComplexRendererFns
Daniel Sandersea8711b2017-10-16 03:36:29 +00003268AArch64InstructionSelector::selectAddrModeUnscaled(MachineOperand &Root,
3269 unsigned Size) const {
3270 MachineRegisterInfo &MRI =
3271 Root.getParent()->getParent()->getParent()->getRegInfo();
3272
3273 if (!Root.isReg())
3274 return None;
3275
3276 if (!isBaseWithConstantOffset(Root, MRI))
3277 return None;
3278
3279 MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
3280 if (!RootDef)
3281 return None;
3282
3283 MachineOperand &OffImm = RootDef->getOperand(2);
3284 if (!OffImm.isReg())
3285 return None;
3286 MachineInstr *RHS = MRI.getVRegDef(OffImm.getReg());
3287 if (!RHS || RHS->getOpcode() != TargetOpcode::G_CONSTANT)
3288 return None;
3289 int64_t RHSC;
3290 MachineOperand &RHSOp1 = RHS->getOperand(1);
3291 if (!RHSOp1.isCImm() || RHSOp1.getCImm()->getBitWidth() > 64)
3292 return None;
3293 RHSC = RHSOp1.getCImm()->getSExtValue();
3294
3295 // If the offset is valid as a scaled immediate, don't match here.
3296 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Log2_32(Size)))
3297 return None;
3298 if (RHSC >= -256 && RHSC < 256) {
3299 MachineOperand &Base = RootDef->getOperand(1);
3300 return {{
3301 [=](MachineInstrBuilder &MIB) { MIB.add(Base); },
3302 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
3303 }};
3304 }
3305 return None;
3306}
3307
3308/// Select a "register plus scaled unsigned 12-bit immediate" address. The
3309/// "Size" argument is the size in bytes of the memory reference, which
3310/// determines the scale.
Daniel Sanders1e4569f2017-10-20 20:55:29 +00003311InstructionSelector::ComplexRendererFns
Daniel Sandersea8711b2017-10-16 03:36:29 +00003312AArch64InstructionSelector::selectAddrModeIndexed(MachineOperand &Root,
3313 unsigned Size) const {
3314 MachineRegisterInfo &MRI =
3315 Root.getParent()->getParent()->getParent()->getRegInfo();
3316
3317 if (!Root.isReg())
3318 return None;
3319
3320 MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
3321 if (!RootDef)
3322 return None;
3323
3324 if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
3325 return {{
3326 [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
3327 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
3328 }};
3329 }
3330
3331 if (isBaseWithConstantOffset(Root, MRI)) {
3332 MachineOperand &LHS = RootDef->getOperand(1);
3333 MachineOperand &RHS = RootDef->getOperand(2);
3334 MachineInstr *LHSDef = MRI.getVRegDef(LHS.getReg());
3335 MachineInstr *RHSDef = MRI.getVRegDef(RHS.getReg());
3336 if (LHSDef && RHSDef) {
3337 int64_t RHSC = (int64_t)RHSDef->getOperand(1).getCImm()->getZExtValue();
3338 unsigned Scale = Log2_32(Size);
3339 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
3340 if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
Daniel Sanders01805b62017-10-16 05:39:30 +00003341 return {{
3342 [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); },
3343 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC >> Scale); },
3344 }};
3345
Daniel Sandersea8711b2017-10-16 03:36:29 +00003346 return {{
3347 [=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
3348 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC >> Scale); },
3349 }};
3350 }
3351 }
3352 }
3353
3354 // Before falling back to our general case, check if the unscaled
3355 // instructions can handle this. If so, that's preferable.
3356 if (selectAddrModeUnscaled(Root, Size).hasValue())
3357 return None;
3358
3359 return {{
3360 [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
3361 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
3362 }};
3363}
3364
Volkan Kelesf7f25682018-01-16 18:44:05 +00003365void AArch64InstructionSelector::renderTruncImm(MachineInstrBuilder &MIB,
3366 const MachineInstr &MI) const {
3367 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
3368 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
3369 Optional<int64_t> CstVal = getConstantVRegVal(MI.getOperand(0).getReg(), MRI);
3370 assert(CstVal && "Expected constant value");
3371 MIB.addImm(CstVal.getValue());
3372}
3373
Daniel Sanders0b5293f2017-04-06 09:49:34 +00003374namespace llvm {
3375InstructionSelector *
3376createAArch64InstructionSelector(const AArch64TargetMachine &TM,
3377 AArch64Subtarget &Subtarget,
3378 AArch64RegisterBankInfo &RBI) {
3379 return new AArch64InstructionSelector(TM, Subtarget, RBI);
3380}
3381}