blob: bb878ef2f5c308fdd95e92e843895764a295ab98 [file] [log] [blame]
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001//===- AArch64InstructionSelector.cpp ----------------------------*- C++ -*-==//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00006//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// AArch64.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +000014#include "AArch64InstrInfo.h"
Tim Northovere9600d82017-02-08 17:57:27 +000015#include "AArch64MachineFunctionInfo.h"
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +000016#include "AArch64RegisterBankInfo.h"
17#include "AArch64RegisterInfo.h"
18#include "AArch64Subtarget.h"
Tim Northoverbdf16242016-10-10 21:50:00 +000019#include "AArch64TargetMachine.h"
Tim Northover9ac0eba2016-11-08 00:45:29 +000020#include "MCTargetDesc/AArch64AddressingModes.h"
Amara Emerson2ff22982019-03-14 22:48:15 +000021#include "llvm/ADT/Optional.h"
Daniel Sanders0b5293f2017-04-06 09:49:34 +000022#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
David Blaikie62651302017-10-26 23:39:54 +000023#include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
Amara Emerson1e8c1642018-07-31 00:09:02 +000024#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
Amara Emerson761ca2e2019-03-19 21:43:05 +000025#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
Aditya Nandakumar75ad9cc2017-04-19 20:48:50 +000026#include "llvm/CodeGen/GlobalISel/Utils.h"
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +000027#include "llvm/CodeGen/MachineBasicBlock.h"
Amara Emerson1abe05c2019-02-21 20:20:16 +000028#include "llvm/CodeGen/MachineConstantPool.h"
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +000029#include "llvm/CodeGen/MachineFunction.h"
30#include "llvm/CodeGen/MachineInstr.h"
31#include "llvm/CodeGen/MachineInstrBuilder.h"
Daniel Sanders0b5293f2017-04-06 09:49:34 +000032#include "llvm/CodeGen/MachineOperand.h"
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +000033#include "llvm/CodeGen/MachineRegisterInfo.h"
34#include "llvm/IR/Type.h"
35#include "llvm/Support/Debug.h"
36#include "llvm/Support/raw_ostream.h"
37
38#define DEBUG_TYPE "aarch64-isel"
39
40using namespace llvm;
41
Daniel Sanders0b5293f2017-04-06 09:49:34 +000042namespace {
43
Daniel Sanderse7b0d662017-04-21 15:59:56 +000044#define GET_GLOBALISEL_PREDICATE_BITSET
45#include "AArch64GenGlobalISel.inc"
46#undef GET_GLOBALISEL_PREDICATE_BITSET
47
Daniel Sanders0b5293f2017-04-06 09:49:34 +000048class AArch64InstructionSelector : public InstructionSelector {
49public:
50 AArch64InstructionSelector(const AArch64TargetMachine &TM,
51 const AArch64Subtarget &STI,
52 const AArch64RegisterBankInfo &RBI);
53
Daniel Sandersf76f3152017-11-16 00:46:35 +000054 bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const override;
David Blaikie62651302017-10-26 23:39:54 +000055 static const char *getName() { return DEBUG_TYPE; }
Daniel Sanders0b5293f2017-04-06 09:49:34 +000056
57private:
58 /// tblgen-erated 'select' implementation, used as the initial selector for
59 /// the patterns that don't require complex C++.
Daniel Sandersf76f3152017-11-16 00:46:35 +000060 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
Daniel Sanders0b5293f2017-04-06 09:49:34 +000061
62 bool selectVaStartAAPCS(MachineInstr &I, MachineFunction &MF,
63 MachineRegisterInfo &MRI) const;
64 bool selectVaStartDarwin(MachineInstr &I, MachineFunction &MF,
65 MachineRegisterInfo &MRI) const;
66
67 bool selectCompareBranch(MachineInstr &I, MachineFunction &MF,
68 MachineRegisterInfo &MRI) const;
69
Amara Emerson9bf092d2019-04-09 21:22:43 +000070 bool selectVectorASHR(MachineInstr &I, MachineRegisterInfo &MRI) const;
71 bool selectVectorSHL(MachineInstr &I, MachineRegisterInfo &MRI) const;
72
Amara Emerson5ec14602018-12-10 18:44:58 +000073 // Helper to generate an equivalent of scalar_to_vector into a new register,
74 // returned via 'Dst'.
Amara Emerson8acb0d92019-03-04 19:16:00 +000075 MachineInstr *emitScalarToVector(unsigned EltSize,
Amara Emerson6bcfa1c2019-02-25 18:52:54 +000076 const TargetRegisterClass *DstRC,
77 unsigned Scalar,
78 MachineIRBuilder &MIRBuilder) const;
Jessica Paquette16d67a32019-03-13 23:22:23 +000079
80 /// Emit a lane insert into \p DstReg, or a new vector register if None is
81 /// provided.
82 ///
83 /// The lane inserted into is defined by \p LaneIdx. The vector source
84 /// register is given by \p SrcReg. The register containing the element is
85 /// given by \p EltReg.
86 MachineInstr *emitLaneInsert(Optional<unsigned> DstReg, unsigned SrcReg,
87 unsigned EltReg, unsigned LaneIdx,
88 const RegisterBank &RB,
89 MachineIRBuilder &MIRBuilder) const;
Jessica Paquette5aff1f42019-03-14 18:01:30 +000090 bool selectInsertElt(MachineInstr &I, MachineRegisterInfo &MRI) const;
Amara Emerson5ec14602018-12-10 18:44:58 +000091 bool selectBuildVector(MachineInstr &I, MachineRegisterInfo &MRI) const;
Amara Emerson8cb186c2018-12-20 01:11:04 +000092 bool selectMergeValues(MachineInstr &I, MachineRegisterInfo &MRI) const;
Jessica Paquette245047d2019-01-24 22:00:41 +000093 bool selectUnmergeValues(MachineInstr &I, MachineRegisterInfo &MRI) const;
Amara Emerson5ec14602018-12-10 18:44:58 +000094
Amara Emerson1abe05c2019-02-21 20:20:16 +000095 void collectShuffleMaskIndices(MachineInstr &I, MachineRegisterInfo &MRI,
Amara Emerson2806fd02019-04-12 21:31:21 +000096 SmallVectorImpl<Optional<int>> &Idxs) const;
Amara Emerson1abe05c2019-02-21 20:20:16 +000097 bool selectShuffleVector(MachineInstr &I, MachineRegisterInfo &MRI) const;
Jessica Paquette607774c2019-03-11 22:18:01 +000098 bool selectExtractElt(MachineInstr &I, MachineRegisterInfo &MRI) const;
Amara Emerson2ff22982019-03-14 22:48:15 +000099 bool selectConcatVectors(MachineInstr &I, MachineRegisterInfo &MRI) const;
Amara Emersond61b89b2019-03-14 22:48:18 +0000100 bool selectSplitVectorUnmerge(MachineInstr &I,
101 MachineRegisterInfo &MRI) const;
Jessica Paquette22c62152019-04-02 19:57:26 +0000102 bool selectIntrinsicWithSideEffects(MachineInstr &I,
103 MachineRegisterInfo &MRI) const;
Jessica Paquette7f6fe7c2019-04-29 20:58:17 +0000104 bool selectIntrinsic(MachineInstr &I, MachineRegisterInfo &MRI) const;
Amara Emerson9bf092d2019-04-09 21:22:43 +0000105 bool selectVectorICmp(MachineInstr &I, MachineRegisterInfo &MRI) const;
Jessica Paquette991cb392019-04-23 20:46:19 +0000106 bool selectIntrinsicTrunc(MachineInstr &I, MachineRegisterInfo &MRI) const;
Jessica Paquette4fe75742019-04-23 23:03:03 +0000107 bool selectIntrinsicRound(MachineInstr &I, MachineRegisterInfo &MRI) const;
Amara Emerson1abe05c2019-02-21 20:20:16 +0000108 unsigned emitConstantPoolEntry(Constant *CPVal, MachineFunction &MF) const;
109 MachineInstr *emitLoadFromConstantPool(Constant *CPVal,
110 MachineIRBuilder &MIRBuilder) const;
Amara Emerson2ff22982019-03-14 22:48:15 +0000111
112 // Emit a vector concat operation.
113 MachineInstr *emitVectorConcat(Optional<unsigned> Dst, unsigned Op1,
114 unsigned Op2,
Amara Emerson8acb0d92019-03-04 19:16:00 +0000115 MachineIRBuilder &MIRBuilder) const;
Amara Emersond61b89b2019-03-14 22:48:18 +0000116 MachineInstr *emitExtractVectorElt(Optional<unsigned> DstReg,
117 const RegisterBank &DstRB, LLT ScalarTy,
118 unsigned VecReg, unsigned LaneIdx,
119 MachineIRBuilder &MIRBuilder) const;
Amara Emerson1abe05c2019-02-21 20:20:16 +0000120
Jessica Paquettea3843fe2019-05-01 22:39:43 +0000121 /// Helper function for selecting G_FCONSTANT. If the G_FCONSTANT can be
122 /// materialized using a FMOV instruction, then update MI and return it.
123 /// Otherwise, do nothing and return a nullptr.
124 MachineInstr *emitFMovForFConstant(MachineInstr &MI,
125 MachineRegisterInfo &MRI) const;
126
Daniel Sanders1e4569f2017-10-20 20:55:29 +0000127 ComplexRendererFns selectArithImmed(MachineOperand &Root) const;
Daniel Sanders0b5293f2017-04-06 09:49:34 +0000128
Daniel Sanders1e4569f2017-10-20 20:55:29 +0000129 ComplexRendererFns selectAddrModeUnscaled(MachineOperand &Root,
130 unsigned Size) const;
Daniel Sandersea8711b2017-10-16 03:36:29 +0000131
Daniel Sanders1e4569f2017-10-20 20:55:29 +0000132 ComplexRendererFns selectAddrModeUnscaled8(MachineOperand &Root) const {
Daniel Sandersea8711b2017-10-16 03:36:29 +0000133 return selectAddrModeUnscaled(Root, 1);
134 }
Daniel Sanders1e4569f2017-10-20 20:55:29 +0000135 ComplexRendererFns selectAddrModeUnscaled16(MachineOperand &Root) const {
Daniel Sandersea8711b2017-10-16 03:36:29 +0000136 return selectAddrModeUnscaled(Root, 2);
137 }
Daniel Sanders1e4569f2017-10-20 20:55:29 +0000138 ComplexRendererFns selectAddrModeUnscaled32(MachineOperand &Root) const {
Daniel Sandersea8711b2017-10-16 03:36:29 +0000139 return selectAddrModeUnscaled(Root, 4);
140 }
Daniel Sanders1e4569f2017-10-20 20:55:29 +0000141 ComplexRendererFns selectAddrModeUnscaled64(MachineOperand &Root) const {
Daniel Sandersea8711b2017-10-16 03:36:29 +0000142 return selectAddrModeUnscaled(Root, 8);
143 }
Daniel Sanders1e4569f2017-10-20 20:55:29 +0000144 ComplexRendererFns selectAddrModeUnscaled128(MachineOperand &Root) const {
Daniel Sandersea8711b2017-10-16 03:36:29 +0000145 return selectAddrModeUnscaled(Root, 16);
146 }
147
Daniel Sanders1e4569f2017-10-20 20:55:29 +0000148 ComplexRendererFns selectAddrModeIndexed(MachineOperand &Root,
149 unsigned Size) const;
Daniel Sandersea8711b2017-10-16 03:36:29 +0000150 template <int Width>
Daniel Sanders1e4569f2017-10-20 20:55:29 +0000151 ComplexRendererFns selectAddrModeIndexed(MachineOperand &Root) const {
Daniel Sandersea8711b2017-10-16 03:36:29 +0000152 return selectAddrModeIndexed(Root, Width / 8);
153 }
154
Volkan Kelesf7f25682018-01-16 18:44:05 +0000155 void renderTruncImm(MachineInstrBuilder &MIB, const MachineInstr &MI) const;
156
Amara Emerson1e8c1642018-07-31 00:09:02 +0000157 // Materialize a GlobalValue or BlockAddress using a movz+movk sequence.
158 void materializeLargeCMVal(MachineInstr &I, const Value *V,
159 unsigned char OpFlags) const;
160
Amara Emerson761ca2e2019-03-19 21:43:05 +0000161 // Optimization methods.
162
163 // Helper function to check if a reg def is an MI with a given opcode and
164 // returns it if so.
165 MachineInstr *findMIFromReg(unsigned Reg, unsigned Opc,
166 MachineIRBuilder &MIB) const {
167 auto *Def = MIB.getMRI()->getVRegDef(Reg);
168 if (!Def || Def->getOpcode() != Opc)
169 return nullptr;
170 return Def;
171 }
172
173 bool tryOptVectorShuffle(MachineInstr &I) const;
174 bool tryOptVectorDup(MachineInstr &MI) const;
175
Daniel Sanders0b5293f2017-04-06 09:49:34 +0000176 const AArch64TargetMachine &TM;
177 const AArch64Subtarget &STI;
178 const AArch64InstrInfo &TII;
179 const AArch64RegisterInfo &TRI;
180 const AArch64RegisterBankInfo &RBI;
Daniel Sanderse7b0d662017-04-21 15:59:56 +0000181
Daniel Sanderse9fdba32017-04-29 17:30:09 +0000182#define GET_GLOBALISEL_PREDICATES_DECL
183#include "AArch64GenGlobalISel.inc"
184#undef GET_GLOBALISEL_PREDICATES_DECL
Daniel Sanders0b5293f2017-04-06 09:49:34 +0000185
186// We declare the temporaries used by selectImpl() in the class to minimize the
187// cost of constructing placeholder values.
188#define GET_GLOBALISEL_TEMPORARIES_DECL
189#include "AArch64GenGlobalISel.inc"
190#undef GET_GLOBALISEL_TEMPORARIES_DECL
191};
192
193} // end anonymous namespace
194
Daniel Sanders8a4bae92017-03-14 21:32:08 +0000195#define GET_GLOBALISEL_IMPL
Ahmed Bougacha36f70352016-12-21 23:26:20 +0000196#include "AArch64GenGlobalISel.inc"
Daniel Sanders8a4bae92017-03-14 21:32:08 +0000197#undef GET_GLOBALISEL_IMPL
Ahmed Bougacha36f70352016-12-21 23:26:20 +0000198
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000199AArch64InstructionSelector::AArch64InstructionSelector(
Tim Northoverbdf16242016-10-10 21:50:00 +0000200 const AArch64TargetMachine &TM, const AArch64Subtarget &STI,
201 const AArch64RegisterBankInfo &RBI)
Daniel Sanders8a4bae92017-03-14 21:32:08 +0000202 : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
Daniel Sanderse9fdba32017-04-29 17:30:09 +0000203 TRI(*STI.getRegisterInfo()), RBI(RBI),
204#define GET_GLOBALISEL_PREDICATES_INIT
205#include "AArch64GenGlobalISel.inc"
206#undef GET_GLOBALISEL_PREDICATES_INIT
Daniel Sanders8a4bae92017-03-14 21:32:08 +0000207#define GET_GLOBALISEL_TEMPORARIES_INIT
208#include "AArch64GenGlobalISel.inc"
209#undef GET_GLOBALISEL_TEMPORARIES_INIT
210{
211}
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000212
Tim Northoverfb8d9892016-10-12 22:49:15 +0000213// FIXME: This should be target-independent, inferred from the types declared
214// for each class in the bank.
215static const TargetRegisterClass *
216getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB,
Amara Emerson3838ed02018-02-02 18:03:30 +0000217 const RegisterBankInfo &RBI,
218 bool GetAllRegSet = false) {
Tim Northoverfb8d9892016-10-12 22:49:15 +0000219 if (RB.getID() == AArch64::GPRRegBankID) {
220 if (Ty.getSizeInBits() <= 32)
Amara Emerson3838ed02018-02-02 18:03:30 +0000221 return GetAllRegSet ? &AArch64::GPR32allRegClass
222 : &AArch64::GPR32RegClass;
Tim Northoverfb8d9892016-10-12 22:49:15 +0000223 if (Ty.getSizeInBits() == 64)
Amara Emerson3838ed02018-02-02 18:03:30 +0000224 return GetAllRegSet ? &AArch64::GPR64allRegClass
225 : &AArch64::GPR64RegClass;
Tim Northoverfb8d9892016-10-12 22:49:15 +0000226 return nullptr;
227 }
228
229 if (RB.getID() == AArch64::FPRRegBankID) {
Amara Emerson3838ed02018-02-02 18:03:30 +0000230 if (Ty.getSizeInBits() <= 16)
231 return &AArch64::FPR16RegClass;
Tim Northoverfb8d9892016-10-12 22:49:15 +0000232 if (Ty.getSizeInBits() == 32)
233 return &AArch64::FPR32RegClass;
234 if (Ty.getSizeInBits() == 64)
235 return &AArch64::FPR64RegClass;
236 if (Ty.getSizeInBits() == 128)
237 return &AArch64::FPR128RegClass;
238 return nullptr;
239 }
240
241 return nullptr;
242}
243
Jessica Paquette245047d2019-01-24 22:00:41 +0000244/// Given a register bank, and size in bits, return the smallest register class
245/// that can represent that combination.
Benjamin Kramer711950c2019-02-11 15:16:21 +0000246static const TargetRegisterClass *
247getMinClassForRegBank(const RegisterBank &RB, unsigned SizeInBits,
248 bool GetAllRegSet = false) {
Jessica Paquette245047d2019-01-24 22:00:41 +0000249 unsigned RegBankID = RB.getID();
250
251 if (RegBankID == AArch64::GPRRegBankID) {
252 if (SizeInBits <= 32)
253 return GetAllRegSet ? &AArch64::GPR32allRegClass
254 : &AArch64::GPR32RegClass;
255 if (SizeInBits == 64)
256 return GetAllRegSet ? &AArch64::GPR64allRegClass
257 : &AArch64::GPR64RegClass;
258 }
259
260 if (RegBankID == AArch64::FPRRegBankID) {
261 switch (SizeInBits) {
262 default:
263 return nullptr;
264 case 8:
265 return &AArch64::FPR8RegClass;
266 case 16:
267 return &AArch64::FPR16RegClass;
268 case 32:
269 return &AArch64::FPR32RegClass;
270 case 64:
271 return &AArch64::FPR64RegClass;
272 case 128:
273 return &AArch64::FPR128RegClass;
274 }
275 }
276
277 return nullptr;
278}
279
280/// Returns the correct subregister to use for a given register class.
281static bool getSubRegForClass(const TargetRegisterClass *RC,
282 const TargetRegisterInfo &TRI, unsigned &SubReg) {
283 switch (TRI.getRegSizeInBits(*RC)) {
284 case 8:
285 SubReg = AArch64::bsub;
286 break;
287 case 16:
288 SubReg = AArch64::hsub;
289 break;
290 case 32:
291 if (RC == &AArch64::GPR32RegClass)
292 SubReg = AArch64::sub_32;
293 else
294 SubReg = AArch64::ssub;
295 break;
296 case 64:
297 SubReg = AArch64::dsub;
298 break;
299 default:
300 LLVM_DEBUG(
301 dbgs() << "Couldn't find appropriate subregister for register class.");
302 return false;
303 }
304
305 return true;
306}
307
Ahmed Bougacha59e160a2016-08-16 14:37:40 +0000308/// Check whether \p I is a currently unsupported binary operation:
309/// - it has an unsized type
310/// - an operand is not a vreg
311/// - all operands are not in the same bank
312/// These are checks that should someday live in the verifier, but right now,
313/// these are mostly limitations of the aarch64 selector.
314static bool unsupportedBinOp(const MachineInstr &I,
315 const AArch64RegisterBankInfo &RBI,
316 const MachineRegisterInfo &MRI,
317 const AArch64RegisterInfo &TRI) {
Tim Northover0f140c72016-09-09 11:46:34 +0000318 LLT Ty = MRI.getType(I.getOperand(0).getReg());
Tim Northover32a078a2016-09-15 10:09:59 +0000319 if (!Ty.isValid()) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000320 LLVM_DEBUG(dbgs() << "Generic binop register should be typed\n");
Ahmed Bougacha59e160a2016-08-16 14:37:40 +0000321 return true;
322 }
323
324 const RegisterBank *PrevOpBank = nullptr;
325 for (auto &MO : I.operands()) {
326 // FIXME: Support non-register operands.
327 if (!MO.isReg()) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000328 LLVM_DEBUG(dbgs() << "Generic inst non-reg operands are unsupported\n");
Ahmed Bougacha59e160a2016-08-16 14:37:40 +0000329 return true;
330 }
331
332 // FIXME: Can generic operations have physical registers operands? If
333 // so, this will need to be taught about that, and we'll need to get the
334 // bank out of the minimal class for the register.
335 // Either way, this needs to be documented (and possibly verified).
336 if (!TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000337 LLVM_DEBUG(dbgs() << "Generic inst has physical register operand\n");
Ahmed Bougacha59e160a2016-08-16 14:37:40 +0000338 return true;
339 }
340
341 const RegisterBank *OpBank = RBI.getRegBank(MO.getReg(), MRI, TRI);
342 if (!OpBank) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000343 LLVM_DEBUG(dbgs() << "Generic register has no bank or class\n");
Ahmed Bougacha59e160a2016-08-16 14:37:40 +0000344 return true;
345 }
346
347 if (PrevOpBank && OpBank != PrevOpBank) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000348 LLVM_DEBUG(dbgs() << "Generic inst operands have different banks\n");
Ahmed Bougacha59e160a2016-08-16 14:37:40 +0000349 return true;
350 }
351 PrevOpBank = OpBank;
352 }
353 return false;
354}
355
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000356/// Select the AArch64 opcode for the basic binary operation \p GenericOpc
Ahmed Bougachacfb384d2017-01-23 21:10:05 +0000357/// (such as G_OR or G_SDIV), appropriate for the register bank \p RegBankID
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000358/// and of size \p OpSize.
359/// \returns \p GenericOpc if the combination is unsupported.
360static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID,
361 unsigned OpSize) {
362 switch (RegBankID) {
363 case AArch64::GPRRegBankID:
Ahmed Bougacha05a5f7d2017-01-25 02:41:38 +0000364 if (OpSize == 32) {
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000365 switch (GenericOpc) {
Ahmed Bougacha2ac5bf92016-08-16 14:02:47 +0000366 case TargetOpcode::G_SHL:
367 return AArch64::LSLVWr;
368 case TargetOpcode::G_LSHR:
369 return AArch64::LSRVWr;
370 case TargetOpcode::G_ASHR:
371 return AArch64::ASRVWr;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000372 default:
373 return GenericOpc;
374 }
Tim Northover55782222016-10-18 20:03:48 +0000375 } else if (OpSize == 64) {
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000376 switch (GenericOpc) {
Tim Northover2fda4b02016-10-10 21:49:49 +0000377 case TargetOpcode::G_GEP:
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000378 return AArch64::ADDXrr;
Ahmed Bougacha2ac5bf92016-08-16 14:02:47 +0000379 case TargetOpcode::G_SHL:
380 return AArch64::LSLVXr;
381 case TargetOpcode::G_LSHR:
382 return AArch64::LSRVXr;
383 case TargetOpcode::G_ASHR:
384 return AArch64::ASRVXr;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000385 default:
386 return GenericOpc;
387 }
388 }
Simon Pilgrim9e901522017-07-08 19:28:24 +0000389 break;
Ahmed Bougacha33e19fe2016-08-18 16:05:11 +0000390 case AArch64::FPRRegBankID:
391 switch (OpSize) {
392 case 32:
393 switch (GenericOpc) {
394 case TargetOpcode::G_FADD:
395 return AArch64::FADDSrr;
396 case TargetOpcode::G_FSUB:
397 return AArch64::FSUBSrr;
398 case TargetOpcode::G_FMUL:
399 return AArch64::FMULSrr;
400 case TargetOpcode::G_FDIV:
401 return AArch64::FDIVSrr;
402 default:
403 return GenericOpc;
404 }
405 case 64:
406 switch (GenericOpc) {
407 case TargetOpcode::G_FADD:
408 return AArch64::FADDDrr;
409 case TargetOpcode::G_FSUB:
410 return AArch64::FSUBDrr;
411 case TargetOpcode::G_FMUL:
412 return AArch64::FMULDrr;
413 case TargetOpcode::G_FDIV:
414 return AArch64::FDIVDrr;
Quentin Colombet0e531272016-10-11 00:21:11 +0000415 case TargetOpcode::G_OR:
416 return AArch64::ORRv8i8;
Ahmed Bougacha33e19fe2016-08-18 16:05:11 +0000417 default:
418 return GenericOpc;
419 }
420 }
Simon Pilgrim9e901522017-07-08 19:28:24 +0000421 break;
422 }
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000423 return GenericOpc;
424}
425
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000426/// Select the AArch64 opcode for the G_LOAD or G_STORE operation \p GenericOpc,
427/// appropriate for the (value) register bank \p RegBankID and of memory access
428/// size \p OpSize. This returns the variant with the base+unsigned-immediate
429/// addressing mode (e.g., LDRXui).
430/// \returns \p GenericOpc if the combination is unsupported.
431static unsigned selectLoadStoreUIOp(unsigned GenericOpc, unsigned RegBankID,
432 unsigned OpSize) {
433 const bool isStore = GenericOpc == TargetOpcode::G_STORE;
434 switch (RegBankID) {
435 case AArch64::GPRRegBankID:
436 switch (OpSize) {
Tim Northover020d1042016-10-17 18:36:53 +0000437 case 8:
438 return isStore ? AArch64::STRBBui : AArch64::LDRBBui;
439 case 16:
440 return isStore ? AArch64::STRHHui : AArch64::LDRHHui;
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000441 case 32:
442 return isStore ? AArch64::STRWui : AArch64::LDRWui;
443 case 64:
444 return isStore ? AArch64::STRXui : AArch64::LDRXui;
445 }
Simon Pilgrim9e901522017-07-08 19:28:24 +0000446 break;
Quentin Colombetd2623f8e2016-10-11 00:21:14 +0000447 case AArch64::FPRRegBankID:
448 switch (OpSize) {
Tim Northover020d1042016-10-17 18:36:53 +0000449 case 8:
450 return isStore ? AArch64::STRBui : AArch64::LDRBui;
451 case 16:
452 return isStore ? AArch64::STRHui : AArch64::LDRHui;
Quentin Colombetd2623f8e2016-10-11 00:21:14 +0000453 case 32:
454 return isStore ? AArch64::STRSui : AArch64::LDRSui;
455 case 64:
456 return isStore ? AArch64::STRDui : AArch64::LDRDui;
457 }
Simon Pilgrim9e901522017-07-08 19:28:24 +0000458 break;
459 }
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000460 return GenericOpc;
461}
462
Benjamin Kramer1411ecf2019-01-24 23:39:47 +0000463#ifndef NDEBUG
Jessica Paquette245047d2019-01-24 22:00:41 +0000464/// Helper function that verifies that we have a valid copy at the end of
465/// selectCopy. Verifies that the source and dest have the expected sizes and
466/// then returns true.
467static bool isValidCopy(const MachineInstr &I, const RegisterBank &DstBank,
468 const MachineRegisterInfo &MRI,
469 const TargetRegisterInfo &TRI,
470 const RegisterBankInfo &RBI) {
471 const unsigned DstReg = I.getOperand(0).getReg();
472 const unsigned SrcReg = I.getOperand(1).getReg();
473 const unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
474 const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
Amara Emersondb211892018-02-20 05:11:57 +0000475
Jessica Paquette245047d2019-01-24 22:00:41 +0000476 // Make sure the size of the source and dest line up.
477 assert(
478 (DstSize == SrcSize ||
479 // Copies are a mean to setup initial types, the number of
480 // bits may not exactly match.
481 (TargetRegisterInfo::isPhysicalRegister(SrcReg) && DstSize <= SrcSize) ||
482 // Copies are a mean to copy bits around, as long as we are
483 // on the same register class, that's fine. Otherwise, that
484 // means we need some SUBREG_TO_REG or AND & co.
485 (((DstSize + 31) / 32 == (SrcSize + 31) / 32) && DstSize > SrcSize)) &&
486 "Copy with different width?!");
487
488 // Check the size of the destination.
489 assert((DstSize <= 64 || DstBank.getID() == AArch64::FPRRegBankID) &&
490 "GPRs cannot get more than 64-bit width values");
491
492 return true;
493}
Benjamin Kramer1411ecf2019-01-24 23:39:47 +0000494#endif
Jessica Paquette245047d2019-01-24 22:00:41 +0000495
496/// Helper function for selectCopy. Inserts a subregister copy from
497/// \p *From to \p *To, linking it up to \p I.
498///
499/// e.g, given I = "Dst = COPY SrcReg", we'll transform that into
500///
501/// CopyReg (From class) = COPY SrcReg
502/// SubRegCopy (To class) = COPY CopyReg:SubReg
503/// Dst = COPY SubRegCopy
Amara Emerson3739a202019-03-15 21:59:50 +0000504static bool selectSubregisterCopy(MachineInstr &I, MachineRegisterInfo &MRI,
Jessica Paquette245047d2019-01-24 22:00:41 +0000505 const RegisterBankInfo &RBI, unsigned SrcReg,
506 const TargetRegisterClass *From,
507 const TargetRegisterClass *To,
508 unsigned SubReg) {
Amara Emerson3739a202019-03-15 21:59:50 +0000509 MachineIRBuilder MIB(I);
510 auto Copy = MIB.buildCopy({From}, {SrcReg});
Amara Emerson86271782019-03-18 19:20:10 +0000511 auto SubRegCopy = MIB.buildInstr(TargetOpcode::COPY, {To}, {})
512 .addReg(Copy.getReg(0), 0, SubReg);
Amara Emersondb211892018-02-20 05:11:57 +0000513 MachineOperand &RegOp = I.getOperand(1);
Amara Emerson3739a202019-03-15 21:59:50 +0000514 RegOp.setReg(SubRegCopy.getReg(0));
Jessica Paquette245047d2019-01-24 22:00:41 +0000515
516 // It's possible that the destination register won't be constrained. Make
517 // sure that happens.
518 if (!TargetRegisterInfo::isPhysicalRegister(I.getOperand(0).getReg()))
519 RBI.constrainGenericRegister(I.getOperand(0).getReg(), *To, MRI);
520
Amara Emersondb211892018-02-20 05:11:57 +0000521 return true;
522}
523
Jessica Paquette910630c2019-05-03 22:37:46 +0000524/// Helper function to get the source and destination register classes for a
525/// copy. Returns a std::pair containing the source register class for the
526/// copy, and the destination register class for the copy. If a register class
527/// cannot be determined, then it will be nullptr.
528static std::pair<const TargetRegisterClass *, const TargetRegisterClass *>
529getRegClassesForCopy(MachineInstr &I, const TargetInstrInfo &TII,
530 MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
531 const RegisterBankInfo &RBI) {
532 unsigned DstReg = I.getOperand(0).getReg();
533 unsigned SrcReg = I.getOperand(1).getReg();
534 const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
535 const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
536 unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
537 unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
538
539 // Special casing for cross-bank copies of s1s. We can technically represent
540 // a 1-bit value with any size of register. The minimum size for a GPR is 32
541 // bits. So, we need to put the FPR on 32 bits as well.
542 //
543 // FIXME: I'm not sure if this case holds true outside of copies. If it does,
544 // then we can pull it into the helpers that get the appropriate class for a
545 // register bank. Or make a new helper that carries along some constraint
546 // information.
547 if (SrcRegBank != DstRegBank && (DstSize == 1 && SrcSize == 1))
548 SrcSize = DstSize = 32;
549
550 return {getMinClassForRegBank(SrcRegBank, SrcSize, true),
551 getMinClassForRegBank(DstRegBank, DstSize, true)};
552}
553
Quentin Colombetcb629a82016-10-12 03:57:49 +0000554static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII,
555 MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
556 const RegisterBankInfo &RBI) {
557
558 unsigned DstReg = I.getOperand(0).getReg();
Amara Emersondb211892018-02-20 05:11:57 +0000559 unsigned SrcReg = I.getOperand(1).getReg();
Jessica Paquette245047d2019-01-24 22:00:41 +0000560 const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
561 const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
Jessica Paquette910630c2019-05-03 22:37:46 +0000562
563 // Find the correct register classes for the source and destination registers.
564 const TargetRegisterClass *SrcRC;
565 const TargetRegisterClass *DstRC;
566 std::tie(SrcRC, DstRC) = getRegClassesForCopy(I, TII, MRI, TRI, RBI);
567
Jessica Paquette245047d2019-01-24 22:00:41 +0000568 if (!DstRC) {
569 LLVM_DEBUG(dbgs() << "Unexpected dest size "
570 << RBI.getSizeInBits(DstReg, MRI, TRI) << '\n');
Amara Emerson3838ed02018-02-02 18:03:30 +0000571 return false;
Quentin Colombetcb629a82016-10-12 03:57:49 +0000572 }
573
Jessica Paquette245047d2019-01-24 22:00:41 +0000574 // A couple helpers below, for making sure that the copy we produce is valid.
575
576 // Set to true if we insert a SUBREG_TO_REG. If we do this, then we don't want
577 // to verify that the src and dst are the same size, since that's handled by
578 // the SUBREG_TO_REG.
579 bool KnownValid = false;
580
581 // Returns true, or asserts if something we don't expect happens. Instead of
582 // returning true, we return isValidCopy() to ensure that we verify the
583 // result.
Jessica Paquette76c40f82019-01-24 22:51:31 +0000584 auto CheckCopy = [&]() {
Jessica Paquette245047d2019-01-24 22:00:41 +0000585 // If we have a bitcast or something, we can't have physical registers.
586 assert(
Simon Pilgrimdea61742019-01-25 11:38:40 +0000587 (I.isCopy() ||
588 (!TargetRegisterInfo::isPhysicalRegister(I.getOperand(0).getReg()) &&
589 !TargetRegisterInfo::isPhysicalRegister(I.getOperand(1).getReg()))) &&
590 "No phys reg on generic operator!");
Jessica Paquette245047d2019-01-24 22:00:41 +0000591 assert(KnownValid || isValidCopy(I, DstRegBank, MRI, TRI, RBI));
Jonas Hahnfeld65a401f2019-03-04 08:51:32 +0000592 (void)KnownValid;
Jessica Paquette245047d2019-01-24 22:00:41 +0000593 return true;
594 };
595
596 // Is this a copy? If so, then we may need to insert a subregister copy, or
597 // a SUBREG_TO_REG.
598 if (I.isCopy()) {
599 // Yes. Check if there's anything to fix up.
Amara Emerson7e9f3482018-02-18 17:10:49 +0000600 if (!SrcRC) {
Jessica Paquette245047d2019-01-24 22:00:41 +0000601 LLVM_DEBUG(dbgs() << "Couldn't determine source register class\n");
602 return false;
Amara Emerson7e9f3482018-02-18 17:10:49 +0000603 }
Jessica Paquette245047d2019-01-24 22:00:41 +0000604
605 // Is this a cross-bank copy?
606 if (DstRegBank.getID() != SrcRegBank.getID()) {
607 // If we're doing a cross-bank copy on different-sized registers, we need
608 // to do a bit more work.
609 unsigned SrcSize = TRI.getRegSizeInBits(*SrcRC);
610 unsigned DstSize = TRI.getRegSizeInBits(*DstRC);
611
612 if (SrcSize > DstSize) {
613 // We're doing a cross-bank copy into a smaller register. We need a
614 // subregister copy. First, get a register class that's on the same bank
615 // as the destination, but the same size as the source.
616 const TargetRegisterClass *SubregRC =
617 getMinClassForRegBank(DstRegBank, SrcSize, true);
618 assert(SubregRC && "Didn't get a register class for subreg?");
619
620 // Get the appropriate subregister for the destination.
621 unsigned SubReg = 0;
622 if (!getSubRegForClass(DstRC, TRI, SubReg)) {
623 LLVM_DEBUG(dbgs() << "Couldn't determine subregister for copy.\n");
624 return false;
625 }
626
627 // Now, insert a subregister copy using the new register class.
Amara Emerson3739a202019-03-15 21:59:50 +0000628 selectSubregisterCopy(I, MRI, RBI, SrcReg, SubregRC, DstRC, SubReg);
Jessica Paquette245047d2019-01-24 22:00:41 +0000629 return CheckCopy();
630 }
631
632 else if (DstRegBank.getID() == AArch64::GPRRegBankID && DstSize == 32 &&
633 SrcSize == 16) {
634 // Special case for FPR16 to GPR32.
635 // FIXME: This can probably be generalized like the above case.
636 unsigned PromoteReg =
637 MRI.createVirtualRegister(&AArch64::FPR32RegClass);
638 BuildMI(*I.getParent(), I, I.getDebugLoc(),
639 TII.get(AArch64::SUBREG_TO_REG), PromoteReg)
640 .addImm(0)
641 .addUse(SrcReg)
642 .addImm(AArch64::hsub);
643 MachineOperand &RegOp = I.getOperand(1);
644 RegOp.setReg(PromoteReg);
645
646 // Promise that the copy is implicitly validated by the SUBREG_TO_REG.
647 KnownValid = true;
648 }
Amara Emerson7e9f3482018-02-18 17:10:49 +0000649 }
Jessica Paquette245047d2019-01-24 22:00:41 +0000650
651 // If the destination is a physical register, then there's nothing to
652 // change, so we're done.
653 if (TargetRegisterInfo::isPhysicalRegister(DstReg))
654 return CheckCopy();
Amara Emerson7e9f3482018-02-18 17:10:49 +0000655 }
656
Jessica Paquette245047d2019-01-24 22:00:41 +0000657 // No need to constrain SrcReg. It will get constrained when we hit another
658 // of its use or its defs. Copies do not have constraints.
659 if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000660 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
661 << " operand\n");
Quentin Colombetcb629a82016-10-12 03:57:49 +0000662 return false;
663 }
664 I.setDesc(TII.get(AArch64::COPY));
Jessica Paquette245047d2019-01-24 22:00:41 +0000665 return CheckCopy();
Quentin Colombetcb629a82016-10-12 03:57:49 +0000666}
667
Tim Northover69271c62016-10-12 22:49:11 +0000668static unsigned selectFPConvOpc(unsigned GenericOpc, LLT DstTy, LLT SrcTy) {
669 if (!DstTy.isScalar() || !SrcTy.isScalar())
670 return GenericOpc;
671
672 const unsigned DstSize = DstTy.getSizeInBits();
673 const unsigned SrcSize = SrcTy.getSizeInBits();
674
675 switch (DstSize) {
676 case 32:
677 switch (SrcSize) {
678 case 32:
679 switch (GenericOpc) {
680 case TargetOpcode::G_SITOFP:
681 return AArch64::SCVTFUWSri;
682 case TargetOpcode::G_UITOFP:
683 return AArch64::UCVTFUWSri;
684 case TargetOpcode::G_FPTOSI:
685 return AArch64::FCVTZSUWSr;
686 case TargetOpcode::G_FPTOUI:
687 return AArch64::FCVTZUUWSr;
688 default:
689 return GenericOpc;
690 }
691 case 64:
692 switch (GenericOpc) {
693 case TargetOpcode::G_SITOFP:
694 return AArch64::SCVTFUXSri;
695 case TargetOpcode::G_UITOFP:
696 return AArch64::UCVTFUXSri;
697 case TargetOpcode::G_FPTOSI:
698 return AArch64::FCVTZSUWDr;
699 case TargetOpcode::G_FPTOUI:
700 return AArch64::FCVTZUUWDr;
701 default:
702 return GenericOpc;
703 }
704 default:
705 return GenericOpc;
706 }
707 case 64:
708 switch (SrcSize) {
709 case 32:
710 switch (GenericOpc) {
711 case TargetOpcode::G_SITOFP:
712 return AArch64::SCVTFUWDri;
713 case TargetOpcode::G_UITOFP:
714 return AArch64::UCVTFUWDri;
715 case TargetOpcode::G_FPTOSI:
716 return AArch64::FCVTZSUXSr;
717 case TargetOpcode::G_FPTOUI:
718 return AArch64::FCVTZUUXSr;
719 default:
720 return GenericOpc;
721 }
722 case 64:
723 switch (GenericOpc) {
724 case TargetOpcode::G_SITOFP:
725 return AArch64::SCVTFUXDri;
726 case TargetOpcode::G_UITOFP:
727 return AArch64::UCVTFUXDri;
728 case TargetOpcode::G_FPTOSI:
729 return AArch64::FCVTZSUXDr;
730 case TargetOpcode::G_FPTOUI:
731 return AArch64::FCVTZUUXDr;
732 default:
733 return GenericOpc;
734 }
735 default:
736 return GenericOpc;
737 }
738 default:
739 return GenericOpc;
740 };
741 return GenericOpc;
742}
743
Tim Northover6c02ad52016-10-12 22:49:04 +0000744static AArch64CC::CondCode changeICMPPredToAArch64CC(CmpInst::Predicate P) {
745 switch (P) {
746 default:
747 llvm_unreachable("Unknown condition code!");
748 case CmpInst::ICMP_NE:
749 return AArch64CC::NE;
750 case CmpInst::ICMP_EQ:
751 return AArch64CC::EQ;
752 case CmpInst::ICMP_SGT:
753 return AArch64CC::GT;
754 case CmpInst::ICMP_SGE:
755 return AArch64CC::GE;
756 case CmpInst::ICMP_SLT:
757 return AArch64CC::LT;
758 case CmpInst::ICMP_SLE:
759 return AArch64CC::LE;
760 case CmpInst::ICMP_UGT:
761 return AArch64CC::HI;
762 case CmpInst::ICMP_UGE:
763 return AArch64CC::HS;
764 case CmpInst::ICMP_ULT:
765 return AArch64CC::LO;
766 case CmpInst::ICMP_ULE:
767 return AArch64CC::LS;
768 }
769}
770
Tim Northover7dd378d2016-10-12 22:49:07 +0000771static void changeFCMPPredToAArch64CC(CmpInst::Predicate P,
772 AArch64CC::CondCode &CondCode,
773 AArch64CC::CondCode &CondCode2) {
774 CondCode2 = AArch64CC::AL;
775 switch (P) {
776 default:
777 llvm_unreachable("Unknown FP condition!");
778 case CmpInst::FCMP_OEQ:
779 CondCode = AArch64CC::EQ;
780 break;
781 case CmpInst::FCMP_OGT:
782 CondCode = AArch64CC::GT;
783 break;
784 case CmpInst::FCMP_OGE:
785 CondCode = AArch64CC::GE;
786 break;
787 case CmpInst::FCMP_OLT:
788 CondCode = AArch64CC::MI;
789 break;
790 case CmpInst::FCMP_OLE:
791 CondCode = AArch64CC::LS;
792 break;
793 case CmpInst::FCMP_ONE:
794 CondCode = AArch64CC::MI;
795 CondCode2 = AArch64CC::GT;
796 break;
797 case CmpInst::FCMP_ORD:
798 CondCode = AArch64CC::VC;
799 break;
800 case CmpInst::FCMP_UNO:
801 CondCode = AArch64CC::VS;
802 break;
803 case CmpInst::FCMP_UEQ:
804 CondCode = AArch64CC::EQ;
805 CondCode2 = AArch64CC::VS;
806 break;
807 case CmpInst::FCMP_UGT:
808 CondCode = AArch64CC::HI;
809 break;
810 case CmpInst::FCMP_UGE:
811 CondCode = AArch64CC::PL;
812 break;
813 case CmpInst::FCMP_ULT:
814 CondCode = AArch64CC::LT;
815 break;
816 case CmpInst::FCMP_ULE:
817 CondCode = AArch64CC::LE;
818 break;
819 case CmpInst::FCMP_UNE:
820 CondCode = AArch64CC::NE;
821 break;
822 }
823}
824
Ahmed Bougacha641cb202017-03-27 16:35:31 +0000825bool AArch64InstructionSelector::selectCompareBranch(
826 MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
827
828 const unsigned CondReg = I.getOperand(0).getReg();
829 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
830 MachineInstr *CCMI = MRI.getVRegDef(CondReg);
Aditya Nandakumar02c602e2017-07-31 17:00:16 +0000831 if (CCMI->getOpcode() == TargetOpcode::G_TRUNC)
832 CCMI = MRI.getVRegDef(CCMI->getOperand(1).getReg());
Ahmed Bougacha641cb202017-03-27 16:35:31 +0000833 if (CCMI->getOpcode() != TargetOpcode::G_ICMP)
834 return false;
835
836 unsigned LHS = CCMI->getOperand(2).getReg();
837 unsigned RHS = CCMI->getOperand(3).getReg();
838 if (!getConstantVRegVal(RHS, MRI))
839 std::swap(RHS, LHS);
840
841 const auto RHSImm = getConstantVRegVal(RHS, MRI);
842 if (!RHSImm || *RHSImm != 0)
843 return false;
844
845 const RegisterBank &RB = *RBI.getRegBank(LHS, MRI, TRI);
846 if (RB.getID() != AArch64::GPRRegBankID)
847 return false;
848
849 const auto Pred = (CmpInst::Predicate)CCMI->getOperand(1).getPredicate();
850 if (Pred != CmpInst::ICMP_NE && Pred != CmpInst::ICMP_EQ)
851 return false;
852
853 const unsigned CmpWidth = MRI.getType(LHS).getSizeInBits();
854 unsigned CBOpc = 0;
855 if (CmpWidth <= 32)
856 CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZW : AArch64::CBNZW);
857 else if (CmpWidth == 64)
858 CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZX : AArch64::CBNZX);
859 else
860 return false;
861
Aditya Nandakumar18b3f9d2018-01-17 19:31:33 +0000862 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CBOpc))
863 .addUse(LHS)
864 .addMBB(DestMBB)
865 .constrainAllUses(TII, TRI, RBI);
Ahmed Bougacha641cb202017-03-27 16:35:31 +0000866
Ahmed Bougacha641cb202017-03-27 16:35:31 +0000867 I.eraseFromParent();
868 return true;
869}
870
Amara Emerson9bf092d2019-04-09 21:22:43 +0000871bool AArch64InstructionSelector::selectVectorSHL(
872 MachineInstr &I, MachineRegisterInfo &MRI) const {
873 assert(I.getOpcode() == TargetOpcode::G_SHL);
874 unsigned DstReg = I.getOperand(0).getReg();
875 const LLT Ty = MRI.getType(DstReg);
876 unsigned Src1Reg = I.getOperand(1).getReg();
877 unsigned Src2Reg = I.getOperand(2).getReg();
878
879 if (!Ty.isVector())
880 return false;
881
882 unsigned Opc = 0;
Amara Emerson9bf092d2019-04-09 21:22:43 +0000883 if (Ty == LLT::vector(4, 32)) {
884 Opc = AArch64::USHLv4i32;
Amara Emerson9bf092d2019-04-09 21:22:43 +0000885 } else if (Ty == LLT::vector(2, 32)) {
886 Opc = AArch64::USHLv2i32;
Amara Emerson9bf092d2019-04-09 21:22:43 +0000887 } else {
888 LLVM_DEBUG(dbgs() << "Unhandled G_SHL type");
889 return false;
890 }
891
892 MachineIRBuilder MIB(I);
893 auto UShl = MIB.buildInstr(Opc, {DstReg}, {Src1Reg, Src2Reg});
894 constrainSelectedInstRegOperands(*UShl, TII, TRI, RBI);
895 I.eraseFromParent();
896 return true;
897}
898
899bool AArch64InstructionSelector::selectVectorASHR(
900 MachineInstr &I, MachineRegisterInfo &MRI) const {
901 assert(I.getOpcode() == TargetOpcode::G_ASHR);
902 unsigned DstReg = I.getOperand(0).getReg();
903 const LLT Ty = MRI.getType(DstReg);
904 unsigned Src1Reg = I.getOperand(1).getReg();
905 unsigned Src2Reg = I.getOperand(2).getReg();
906
907 if (!Ty.isVector())
908 return false;
909
910 // There is not a shift right register instruction, but the shift left
911 // register instruction takes a signed value, where negative numbers specify a
912 // right shift.
913
914 unsigned Opc = 0;
915 unsigned NegOpc = 0;
916 const TargetRegisterClass *RC = nullptr;
917 if (Ty == LLT::vector(4, 32)) {
918 Opc = AArch64::SSHLv4i32;
919 NegOpc = AArch64::NEGv4i32;
920 RC = &AArch64::FPR128RegClass;
921 } else if (Ty == LLT::vector(2, 32)) {
922 Opc = AArch64::SSHLv2i32;
923 NegOpc = AArch64::NEGv2i32;
924 RC = &AArch64::FPR64RegClass;
925 } else {
926 LLVM_DEBUG(dbgs() << "Unhandled G_ASHR type");
927 return false;
928 }
929
930 MachineIRBuilder MIB(I);
931 auto Neg = MIB.buildInstr(NegOpc, {RC}, {Src2Reg});
932 constrainSelectedInstRegOperands(*Neg, TII, TRI, RBI);
933 auto SShl = MIB.buildInstr(Opc, {DstReg}, {Src1Reg, Neg});
934 constrainSelectedInstRegOperands(*SShl, TII, TRI, RBI);
935 I.eraseFromParent();
936 return true;
937}
938
Tim Northovere9600d82017-02-08 17:57:27 +0000939bool AArch64InstructionSelector::selectVaStartAAPCS(
940 MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
941 return false;
942}
943
944bool AArch64InstructionSelector::selectVaStartDarwin(
945 MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
946 AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
947 unsigned ListReg = I.getOperand(0).getReg();
948
949 unsigned ArgsAddrReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
950
951 auto MIB =
952 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::ADDXri))
953 .addDef(ArgsAddrReg)
954 .addFrameIndex(FuncInfo->getVarArgsStackIndex())
955 .addImm(0)
956 .addImm(0);
957
958 constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
959
960 MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::STRXui))
961 .addUse(ArgsAddrReg)
962 .addUse(ListReg)
963 .addImm(0)
964 .addMemOperand(*I.memoperands_begin());
965
966 constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
967 I.eraseFromParent();
968 return true;
969}
970
Amara Emerson1e8c1642018-07-31 00:09:02 +0000971void AArch64InstructionSelector::materializeLargeCMVal(
972 MachineInstr &I, const Value *V, unsigned char OpFlags) const {
973 MachineBasicBlock &MBB = *I.getParent();
974 MachineFunction &MF = *MBB.getParent();
975 MachineRegisterInfo &MRI = MF.getRegInfo();
976 MachineIRBuilder MIB(I);
977
Aditya Nandakumarcef44a22018-12-11 00:48:50 +0000978 auto MovZ = MIB.buildInstr(AArch64::MOVZXi, {&AArch64::GPR64RegClass}, {});
Amara Emerson1e8c1642018-07-31 00:09:02 +0000979 MovZ->addOperand(MF, I.getOperand(1));
980 MovZ->getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_G0 |
981 AArch64II::MO_NC);
982 MovZ->addOperand(MF, MachineOperand::CreateImm(0));
983 constrainSelectedInstRegOperands(*MovZ, TII, TRI, RBI);
984
985 auto BuildMovK = [&](unsigned SrcReg, unsigned char Flags, unsigned Offset,
986 unsigned ForceDstReg) {
987 unsigned DstReg = ForceDstReg
988 ? ForceDstReg
989 : MRI.createVirtualRegister(&AArch64::GPR64RegClass);
990 auto MovI = MIB.buildInstr(AArch64::MOVKXi).addDef(DstReg).addUse(SrcReg);
991 if (auto *GV = dyn_cast<GlobalValue>(V)) {
992 MovI->addOperand(MF, MachineOperand::CreateGA(
993 GV, MovZ->getOperand(1).getOffset(), Flags));
994 } else {
995 MovI->addOperand(
996 MF, MachineOperand::CreateBA(cast<BlockAddress>(V),
997 MovZ->getOperand(1).getOffset(), Flags));
998 }
999 MovI->addOperand(MF, MachineOperand::CreateImm(Offset));
1000 constrainSelectedInstRegOperands(*MovI, TII, TRI, RBI);
1001 return DstReg;
1002 };
Aditya Nandakumarfef76192019-02-05 22:14:40 +00001003 unsigned DstReg = BuildMovK(MovZ.getReg(0),
Amara Emerson1e8c1642018-07-31 00:09:02 +00001004 AArch64II::MO_G1 | AArch64II::MO_NC, 16, 0);
1005 DstReg = BuildMovK(DstReg, AArch64II::MO_G2 | AArch64II::MO_NC, 32, 0);
1006 BuildMovK(DstReg, AArch64II::MO_G3, 48, I.getOperand(0).getReg());
1007 return;
1008}
1009
Daniel Sandersf76f3152017-11-16 00:46:35 +00001010bool AArch64InstructionSelector::select(MachineInstr &I,
1011 CodeGenCoverage &CoverageInfo) const {
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001012 assert(I.getParent() && "Instruction should be in a basic block!");
1013 assert(I.getParent()->getParent() && "Instruction should be in a function!");
1014
1015 MachineBasicBlock &MBB = *I.getParent();
1016 MachineFunction &MF = *MBB.getParent();
1017 MachineRegisterInfo &MRI = MF.getRegInfo();
1018
Tim Northovercdf23f12016-10-31 18:30:59 +00001019 unsigned Opcode = I.getOpcode();
Aditya Nandakumarefd8a842017-08-23 20:45:48 +00001020 // G_PHI requires same handling as PHI
1021 if (!isPreISelGenericOpcode(Opcode) || Opcode == TargetOpcode::G_PHI) {
Tim Northovercdf23f12016-10-31 18:30:59 +00001022 // Certain non-generic instructions also need some special handling.
1023
1024 if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
1025 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
Tim Northover7d88da62016-11-08 00:34:06 +00001026
Aditya Nandakumarefd8a842017-08-23 20:45:48 +00001027 if (Opcode == TargetOpcode::PHI || Opcode == TargetOpcode::G_PHI) {
Tim Northover7d88da62016-11-08 00:34:06 +00001028 const unsigned DefReg = I.getOperand(0).getReg();
1029 const LLT DefTy = MRI.getType(DefReg);
1030
1031 const TargetRegisterClass *DefRC = nullptr;
1032 if (TargetRegisterInfo::isPhysicalRegister(DefReg)) {
1033 DefRC = TRI.getRegClass(DefReg);
1034 } else {
1035 const RegClassOrRegBank &RegClassOrBank =
1036 MRI.getRegClassOrRegBank(DefReg);
1037
1038 DefRC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
1039 if (!DefRC) {
1040 if (!DefTy.isValid()) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001041 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
Tim Northover7d88da62016-11-08 00:34:06 +00001042 return false;
1043 }
1044 const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
1045 DefRC = getRegClassForTypeOnBank(DefTy, RB, RBI);
1046 if (!DefRC) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001047 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
Tim Northover7d88da62016-11-08 00:34:06 +00001048 return false;
1049 }
1050 }
1051 }
Aditya Nandakumarefd8a842017-08-23 20:45:48 +00001052 I.setDesc(TII.get(TargetOpcode::PHI));
Tim Northover7d88da62016-11-08 00:34:06 +00001053
1054 return RBI.constrainGenericRegister(DefReg, *DefRC, MRI);
1055 }
1056
1057 if (I.isCopy())
Tim Northovercdf23f12016-10-31 18:30:59 +00001058 return selectCopy(I, TII, MRI, TRI, RBI);
Tim Northover7d88da62016-11-08 00:34:06 +00001059
1060 return true;
Tim Northovercdf23f12016-10-31 18:30:59 +00001061 }
1062
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001063
1064 if (I.getNumOperands() != I.getNumExplicitOperands()) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001065 LLVM_DEBUG(
1066 dbgs() << "Generic instruction has unexpected implicit operands\n");
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001067 return false;
1068 }
1069
Daniel Sandersf76f3152017-11-16 00:46:35 +00001070 if (selectImpl(I, CoverageInfo))
Ahmed Bougacha36f70352016-12-21 23:26:20 +00001071 return true;
1072
Tim Northover32a078a2016-09-15 10:09:59 +00001073 LLT Ty =
1074 I.getOperand(0).isReg() ? MRI.getType(I.getOperand(0).getReg()) : LLT{};
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001075
Amara Emerson3739a202019-03-15 21:59:50 +00001076 MachineIRBuilder MIB(I);
1077
Tim Northover69271c62016-10-12 22:49:11 +00001078 switch (Opcode) {
Tim Northover5e3dbf32016-10-12 22:49:01 +00001079 case TargetOpcode::G_BRCOND: {
1080 if (Ty.getSizeInBits() > 32) {
1081 // We shouldn't need this on AArch64, but it would be implemented as an
1082 // EXTRACT_SUBREG followed by a TBNZW because TBNZX has no encoding if the
1083 // bit being tested is < 32.
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001084 LLVM_DEBUG(dbgs() << "G_BRCOND has type: " << Ty
1085 << ", expected at most 32-bits");
Tim Northover5e3dbf32016-10-12 22:49:01 +00001086 return false;
1087 }
1088
1089 const unsigned CondReg = I.getOperand(0).getReg();
1090 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
1091
Kristof Beylse66bc1f2018-12-18 08:50:02 +00001092 // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z
1093 // instructions will not be produced, as they are conditional branch
1094 // instructions that do not set flags.
1095 bool ProduceNonFlagSettingCondBr =
1096 !MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening);
1097 if (ProduceNonFlagSettingCondBr && selectCompareBranch(I, MF, MRI))
Ahmed Bougacha641cb202017-03-27 16:35:31 +00001098 return true;
1099
Kristof Beylse66bc1f2018-12-18 08:50:02 +00001100 if (ProduceNonFlagSettingCondBr) {
1101 auto MIB = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::TBNZW))
1102 .addUse(CondReg)
1103 .addImm(/*bit offset=*/0)
1104 .addMBB(DestMBB);
Tim Northover5e3dbf32016-10-12 22:49:01 +00001105
Kristof Beylse66bc1f2018-12-18 08:50:02 +00001106 I.eraseFromParent();
1107 return constrainSelectedInstRegOperands(*MIB.getInstr(), TII, TRI, RBI);
1108 } else {
1109 auto CMP = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ANDSWri))
1110 .addDef(AArch64::WZR)
1111 .addUse(CondReg)
1112 .addImm(1);
1113 constrainSelectedInstRegOperands(*CMP.getInstr(), TII, TRI, RBI);
1114 auto Bcc =
1115 BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::Bcc))
1116 .addImm(AArch64CC::EQ)
1117 .addMBB(DestMBB);
1118
1119 I.eraseFromParent();
1120 return constrainSelectedInstRegOperands(*Bcc.getInstr(), TII, TRI, RBI);
1121 }
Tim Northover5e3dbf32016-10-12 22:49:01 +00001122 }
1123
Kristof Beyls65a12c02017-01-30 09:13:18 +00001124 case TargetOpcode::G_BRINDIRECT: {
1125 I.setDesc(TII.get(AArch64::BR));
1126 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1127 }
1128
Jessica Paquette67ab9eb2019-04-26 18:00:01 +00001129 case TargetOpcode::G_BSWAP: {
1130 // Handle vector types for G_BSWAP directly.
1131 unsigned DstReg = I.getOperand(0).getReg();
1132 LLT DstTy = MRI.getType(DstReg);
1133
1134 // We should only get vector types here; everything else is handled by the
1135 // importer right now.
1136 if (!DstTy.isVector() || DstTy.getSizeInBits() > 128) {
1137 LLVM_DEBUG(dbgs() << "Dst type for G_BSWAP currently unsupported.\n");
1138 return false;
1139 }
1140
1141 // Only handle 4 and 2 element vectors for now.
1142 // TODO: 16-bit elements.
1143 unsigned NumElts = DstTy.getNumElements();
1144 if (NumElts != 4 && NumElts != 2) {
1145 LLVM_DEBUG(dbgs() << "Unsupported number of elements for G_BSWAP.\n");
1146 return false;
1147 }
1148
1149 // Choose the correct opcode for the supported types. Right now, that's
1150 // v2s32, v4s32, and v2s64.
1151 unsigned Opc = 0;
1152 unsigned EltSize = DstTy.getElementType().getSizeInBits();
1153 if (EltSize == 32)
1154 Opc = (DstTy.getNumElements() == 2) ? AArch64::REV32v8i8
1155 : AArch64::REV32v16i8;
1156 else if (EltSize == 64)
1157 Opc = AArch64::REV64v16i8;
1158
1159 // We should always get something by the time we get here...
1160 assert(Opc != 0 && "Didn't get an opcode for G_BSWAP?");
1161
1162 I.setDesc(TII.get(Opc));
1163 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1164 }
1165
Tim Northover4494d692016-10-18 19:47:57 +00001166 case TargetOpcode::G_FCONSTANT:
Tim Northover4edc60d2016-10-10 21:49:42 +00001167 case TargetOpcode::G_CONSTANT: {
Tim Northover4494d692016-10-18 19:47:57 +00001168 const bool isFP = Opcode == TargetOpcode::G_FCONSTANT;
1169
1170 const LLT s32 = LLT::scalar(32);
1171 const LLT s64 = LLT::scalar(64);
1172 const LLT p0 = LLT::pointer(0, 64);
1173
1174 const unsigned DefReg = I.getOperand(0).getReg();
1175 const LLT DefTy = MRI.getType(DefReg);
1176 const unsigned DefSize = DefTy.getSizeInBits();
1177 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1178
1179 // FIXME: Redundant check, but even less readable when factored out.
1180 if (isFP) {
1181 if (Ty != s32 && Ty != s64) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001182 LLVM_DEBUG(dbgs() << "Unable to materialize FP " << Ty
1183 << " constant, expected: " << s32 << " or " << s64
1184 << '\n');
Tim Northover4494d692016-10-18 19:47:57 +00001185 return false;
1186 }
1187
1188 if (RB.getID() != AArch64::FPRRegBankID) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001189 LLVM_DEBUG(dbgs() << "Unable to materialize FP " << Ty
1190 << " constant on bank: " << RB
1191 << ", expected: FPR\n");
Tim Northover4494d692016-10-18 19:47:57 +00001192 return false;
1193 }
Daniel Sanders11300ce2017-10-13 21:28:03 +00001194
1195 // The case when we have 0.0 is covered by tablegen. Reject it here so we
1196 // can be sure tablegen works correctly and isn't rescued by this code.
1197 if (I.getOperand(1).getFPImm()->getValueAPF().isExactlyValue(0.0))
1198 return false;
Tim Northover4494d692016-10-18 19:47:57 +00001199 } else {
Daniel Sanders05540042017-08-08 10:44:31 +00001200 // s32 and s64 are covered by tablegen.
1201 if (Ty != p0) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001202 LLVM_DEBUG(dbgs() << "Unable to materialize integer " << Ty
1203 << " constant, expected: " << s32 << ", " << s64
1204 << ", or " << p0 << '\n');
Tim Northover4494d692016-10-18 19:47:57 +00001205 return false;
1206 }
1207
1208 if (RB.getID() != AArch64::GPRRegBankID) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001209 LLVM_DEBUG(dbgs() << "Unable to materialize integer " << Ty
1210 << " constant on bank: " << RB
1211 << ", expected: GPR\n");
Tim Northover4494d692016-10-18 19:47:57 +00001212 return false;
1213 }
1214 }
1215
1216 const unsigned MovOpc =
1217 DefSize == 32 ? AArch64::MOVi32imm : AArch64::MOVi64imm;
1218
Tim Northover4494d692016-10-18 19:47:57 +00001219 if (isFP) {
Jessica Paquettea3843fe2019-05-01 22:39:43 +00001220 // Either emit a FMOV, or emit a copy to emit a normal mov.
Tim Northover4494d692016-10-18 19:47:57 +00001221 const TargetRegisterClass &GPRRC =
1222 DefSize == 32 ? AArch64::GPR32RegClass : AArch64::GPR64RegClass;
1223 const TargetRegisterClass &FPRRC =
1224 DefSize == 32 ? AArch64::FPR32RegClass : AArch64::FPR64RegClass;
1225
Jessica Paquettea3843fe2019-05-01 22:39:43 +00001226 // Can we use a FMOV instruction to represent the immediate?
1227 if (emitFMovForFConstant(I, MRI))
1228 return true;
1229
1230 // Nope. Emit a copy and use a normal mov instead.
Tim Northover4494d692016-10-18 19:47:57 +00001231 const unsigned DefGPRReg = MRI.createVirtualRegister(&GPRRC);
1232 MachineOperand &RegOp = I.getOperand(0);
1233 RegOp.setReg(DefGPRReg);
Amara Emerson3739a202019-03-15 21:59:50 +00001234 MIB.setInsertPt(MIB.getMBB(), std::next(I.getIterator()));
1235 MIB.buildCopy({DefReg}, {DefGPRReg});
Tim Northover4494d692016-10-18 19:47:57 +00001236
1237 if (!RBI.constrainGenericRegister(DefReg, FPRRC, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001238 LLVM_DEBUG(dbgs() << "Failed to constrain G_FCONSTANT def operand\n");
Tim Northover4494d692016-10-18 19:47:57 +00001239 return false;
1240 }
1241
1242 MachineOperand &ImmOp = I.getOperand(1);
1243 // FIXME: Is going through int64_t always correct?
1244 ImmOp.ChangeToImmediate(
1245 ImmOp.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
Daniel Sanders066ebbf2017-02-24 15:43:30 +00001246 } else if (I.getOperand(1).isCImm()) {
Tim Northover9267ac52016-12-05 21:47:07 +00001247 uint64_t Val = I.getOperand(1).getCImm()->getZExtValue();
1248 I.getOperand(1).ChangeToImmediate(Val);
Daniel Sanders066ebbf2017-02-24 15:43:30 +00001249 } else if (I.getOperand(1).isImm()) {
1250 uint64_t Val = I.getOperand(1).getImm();
1251 I.getOperand(1).ChangeToImmediate(Val);
Tim Northover4494d692016-10-18 19:47:57 +00001252 }
1253
Jessica Paquettea3843fe2019-05-01 22:39:43 +00001254 I.setDesc(TII.get(MovOpc));
Tim Northover4494d692016-10-18 19:47:57 +00001255 constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1256 return true;
Tim Northover4edc60d2016-10-10 21:49:42 +00001257 }
Tim Northover7b6d66c2017-07-20 22:58:38 +00001258 case TargetOpcode::G_EXTRACT: {
1259 LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
Amara Emersonbc03bae2018-02-18 17:03:02 +00001260 LLT DstTy = MRI.getType(I.getOperand(0).getReg());
Amara Emerson242efdb2018-02-18 17:28:34 +00001261 (void)DstTy;
Amara Emersonbc03bae2018-02-18 17:03:02 +00001262 unsigned SrcSize = SrcTy.getSizeInBits();
Tim Northover7b6d66c2017-07-20 22:58:38 +00001263 // Larger extracts are vectors, same-size extracts should be something else
1264 // by now (either split up or simplified to a COPY).
1265 if (SrcTy.getSizeInBits() > 64 || Ty.getSizeInBits() > 32)
1266 return false;
1267
Amara Emersonbc03bae2018-02-18 17:03:02 +00001268 I.setDesc(TII.get(SrcSize == 64 ? AArch64::UBFMXri : AArch64::UBFMWri));
Tim Northover7b6d66c2017-07-20 22:58:38 +00001269 MachineInstrBuilder(MF, I).addImm(I.getOperand(2).getImm() +
1270 Ty.getSizeInBits() - 1);
1271
Amara Emersonbc03bae2018-02-18 17:03:02 +00001272 if (SrcSize < 64) {
1273 assert(SrcSize == 32 && DstTy.getSizeInBits() == 16 &&
1274 "unexpected G_EXTRACT types");
1275 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1276 }
1277
Tim Northover7b6d66c2017-07-20 22:58:38 +00001278 unsigned DstReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
Amara Emerson3739a202019-03-15 21:59:50 +00001279 MIB.setInsertPt(MIB.getMBB(), std::next(I.getIterator()));
Amara Emerson86271782019-03-18 19:20:10 +00001280 MIB.buildInstr(TargetOpcode::COPY, {I.getOperand(0).getReg()}, {})
1281 .addReg(DstReg, 0, AArch64::sub_32);
Tim Northover7b6d66c2017-07-20 22:58:38 +00001282 RBI.constrainGenericRegister(I.getOperand(0).getReg(),
1283 AArch64::GPR32RegClass, MRI);
1284 I.getOperand(0).setReg(DstReg);
1285
1286 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1287 }
1288
1289 case TargetOpcode::G_INSERT: {
1290 LLT SrcTy = MRI.getType(I.getOperand(2).getReg());
Amara Emersonbc03bae2018-02-18 17:03:02 +00001291 LLT DstTy = MRI.getType(I.getOperand(0).getReg());
1292 unsigned DstSize = DstTy.getSizeInBits();
Tim Northover7b6d66c2017-07-20 22:58:38 +00001293 // Larger inserts are vectors, same-size ones should be something else by
1294 // now (split up or turned into COPYs).
1295 if (Ty.getSizeInBits() > 64 || SrcTy.getSizeInBits() > 32)
1296 return false;
1297
Amara Emersonbc03bae2018-02-18 17:03:02 +00001298 I.setDesc(TII.get(DstSize == 64 ? AArch64::BFMXri : AArch64::BFMWri));
Tim Northover7b6d66c2017-07-20 22:58:38 +00001299 unsigned LSB = I.getOperand(3).getImm();
1300 unsigned Width = MRI.getType(I.getOperand(2).getReg()).getSizeInBits();
Amara Emersonbc03bae2018-02-18 17:03:02 +00001301 I.getOperand(3).setImm((DstSize - LSB) % DstSize);
Tim Northover7b6d66c2017-07-20 22:58:38 +00001302 MachineInstrBuilder(MF, I).addImm(Width - 1);
1303
Amara Emersonbc03bae2018-02-18 17:03:02 +00001304 if (DstSize < 64) {
1305 assert(DstSize == 32 && SrcTy.getSizeInBits() == 16 &&
1306 "unexpected G_INSERT types");
1307 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1308 }
1309
Tim Northover7b6d66c2017-07-20 22:58:38 +00001310 unsigned SrcReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
1311 BuildMI(MBB, I.getIterator(), I.getDebugLoc(),
1312 TII.get(AArch64::SUBREG_TO_REG))
1313 .addDef(SrcReg)
1314 .addImm(0)
1315 .addUse(I.getOperand(2).getReg())
1316 .addImm(AArch64::sub_32);
1317 RBI.constrainGenericRegister(I.getOperand(2).getReg(),
1318 AArch64::GPR32RegClass, MRI);
1319 I.getOperand(2).setReg(SrcReg);
1320
1321 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1322 }
Ahmed Bougacha0306b5e2016-08-16 14:02:42 +00001323 case TargetOpcode::G_FRAME_INDEX: {
1324 // allocas and G_FRAME_INDEX are only supported in addrspace(0).
Tim Northover5ae83502016-09-15 09:20:34 +00001325 if (Ty != LLT::pointer(0, 64)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001326 LLVM_DEBUG(dbgs() << "G_FRAME_INDEX pointer has type: " << Ty
1327 << ", expected: " << LLT::pointer(0, 64) << '\n');
Ahmed Bougacha0306b5e2016-08-16 14:02:42 +00001328 return false;
1329 }
Ahmed Bougacha0306b5e2016-08-16 14:02:42 +00001330 I.setDesc(TII.get(AArch64::ADDXri));
Ahmed Bougacha0306b5e2016-08-16 14:02:42 +00001331
1332 // MOs for a #0 shifted immediate.
1333 I.addOperand(MachineOperand::CreateImm(0));
1334 I.addOperand(MachineOperand::CreateImm(0));
1335
1336 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1337 }
Tim Northoverbdf16242016-10-10 21:50:00 +00001338
1339 case TargetOpcode::G_GLOBAL_VALUE: {
1340 auto GV = I.getOperand(1).getGlobal();
1341 if (GV->isThreadLocal()) {
1342 // FIXME: we don't support TLS yet.
1343 return false;
1344 }
1345 unsigned char OpFlags = STI.ClassifyGlobalReference(GV, TM);
Tim Northoverfe7c59a2016-12-13 18:25:38 +00001346 if (OpFlags & AArch64II::MO_GOT) {
Tim Northoverbdf16242016-10-10 21:50:00 +00001347 I.setDesc(TII.get(AArch64::LOADgot));
Tim Northoverfe7c59a2016-12-13 18:25:38 +00001348 I.getOperand(1).setTargetFlags(OpFlags);
Amara Emersond5785772018-01-18 19:21:27 +00001349 } else if (TM.getCodeModel() == CodeModel::Large) {
1350 // Materialize the global using movz/movk instructions.
Amara Emerson1e8c1642018-07-31 00:09:02 +00001351 materializeLargeCMVal(I, GV, OpFlags);
Amara Emersond5785772018-01-18 19:21:27 +00001352 I.eraseFromParent();
1353 return true;
David Green9dd1d452018-08-22 11:31:39 +00001354 } else if (TM.getCodeModel() == CodeModel::Tiny) {
1355 I.setDesc(TII.get(AArch64::ADR));
1356 I.getOperand(1).setTargetFlags(OpFlags);
Tim Northoverfe7c59a2016-12-13 18:25:38 +00001357 } else {
Tim Northoverbdf16242016-10-10 21:50:00 +00001358 I.setDesc(TII.get(AArch64::MOVaddr));
1359 I.getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_PAGE);
1360 MachineInstrBuilder MIB(MF, I);
1361 MIB.addGlobalAddress(GV, I.getOperand(1).getOffset(),
1362 OpFlags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
1363 }
1364 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1365 }
1366
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001367 case TargetOpcode::G_LOAD:
1368 case TargetOpcode::G_STORE: {
Tim Northover0f140c72016-09-09 11:46:34 +00001369 LLT PtrTy = MRI.getType(I.getOperand(1).getReg());
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001370
Tim Northover5ae83502016-09-15 09:20:34 +00001371 if (PtrTy != LLT::pointer(0, 64)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001372 LLVM_DEBUG(dbgs() << "Load/Store pointer has type: " << PtrTy
1373 << ", expected: " << LLT::pointer(0, 64) << '\n');
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001374 return false;
1375 }
1376
Daniel Sanders3c1c4c02017-12-05 05:52:07 +00001377 auto &MemOp = **I.memoperands_begin();
1378 if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001379 LLVM_DEBUG(dbgs() << "Atomic load/store not supported yet\n");
Daniel Sanders3c1c4c02017-12-05 05:52:07 +00001380 return false;
1381 }
Daniel Sandersf84bc372018-05-05 20:53:24 +00001382 unsigned MemSizeInBits = MemOp.getSize() * 8;
Daniel Sanders3c1c4c02017-12-05 05:52:07 +00001383
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001384 const unsigned PtrReg = I.getOperand(1).getReg();
Ahmed Bougachaf0b22c42017-03-27 18:14:20 +00001385#ifndef NDEBUG
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001386 const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, MRI, TRI);
Ahmed Bougachaf0b22c42017-03-27 18:14:20 +00001387 // Sanity-check the pointer register.
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001388 assert(PtrRB.getID() == AArch64::GPRRegBankID &&
1389 "Load/Store pointer operand isn't a GPR");
Tim Northover0f140c72016-09-09 11:46:34 +00001390 assert(MRI.getType(PtrReg).isPointer() &&
1391 "Load/Store pointer operand isn't a pointer");
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001392#endif
1393
1394 const unsigned ValReg = I.getOperand(0).getReg();
1395 const RegisterBank &RB = *RBI.getRegBank(ValReg, MRI, TRI);
1396
1397 const unsigned NewOpc =
Daniel Sandersf84bc372018-05-05 20:53:24 +00001398 selectLoadStoreUIOp(I.getOpcode(), RB.getID(), MemSizeInBits);
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001399 if (NewOpc == I.getOpcode())
1400 return false;
1401
1402 I.setDesc(TII.get(NewOpc));
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001403
Ahmed Bougacha8a654082017-03-27 17:31:52 +00001404 uint64_t Offset = 0;
1405 auto *PtrMI = MRI.getVRegDef(PtrReg);
1406
1407 // Try to fold a GEP into our unsigned immediate addressing mode.
1408 if (PtrMI->getOpcode() == TargetOpcode::G_GEP) {
1409 if (auto COff = getConstantVRegVal(PtrMI->getOperand(2).getReg(), MRI)) {
1410 int64_t Imm = *COff;
Daniel Sandersf84bc372018-05-05 20:53:24 +00001411 const unsigned Size = MemSizeInBits / 8;
Ahmed Bougacha8a654082017-03-27 17:31:52 +00001412 const unsigned Scale = Log2_32(Size);
1413 if ((Imm & (Size - 1)) == 0 && Imm >= 0 && Imm < (0x1000 << Scale)) {
1414 unsigned Ptr2Reg = PtrMI->getOperand(1).getReg();
1415 I.getOperand(1).setReg(Ptr2Reg);
1416 PtrMI = MRI.getVRegDef(Ptr2Reg);
1417 Offset = Imm / Size;
1418 }
1419 }
1420 }
1421
Ahmed Bougachaf75782f2017-03-27 17:31:56 +00001422 // If we haven't folded anything into our addressing mode yet, try to fold
1423 // a frame index into the base+offset.
1424 if (!Offset && PtrMI->getOpcode() == TargetOpcode::G_FRAME_INDEX)
1425 I.getOperand(1).ChangeToFrameIndex(PtrMI->getOperand(1).getIndex());
1426
Ahmed Bougacha8a654082017-03-27 17:31:52 +00001427 I.addOperand(MachineOperand::CreateImm(Offset));
Ahmed Bougacha85a66a62017-03-27 17:31:48 +00001428
1429 // If we're storing a 0, use WZR/XZR.
1430 if (auto CVal = getConstantVRegVal(ValReg, MRI)) {
1431 if (*CVal == 0 && Opcode == TargetOpcode::G_STORE) {
1432 if (I.getOpcode() == AArch64::STRWui)
1433 I.getOperand(0).setReg(AArch64::WZR);
1434 else if (I.getOpcode() == AArch64::STRXui)
1435 I.getOperand(0).setReg(AArch64::XZR);
1436 }
1437 }
1438
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001439 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1440 }
1441
Tim Northover9dd78f82017-02-08 21:22:25 +00001442 case TargetOpcode::G_SMULH:
1443 case TargetOpcode::G_UMULH: {
1444 // Reject the various things we don't support yet.
1445 if (unsupportedBinOp(I, RBI, MRI, TRI))
1446 return false;
1447
1448 const unsigned DefReg = I.getOperand(0).getReg();
1449 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1450
1451 if (RB.getID() != AArch64::GPRRegBankID) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001452 LLVM_DEBUG(dbgs() << "G_[SU]MULH on bank: " << RB << ", expected: GPR\n");
Tim Northover9dd78f82017-02-08 21:22:25 +00001453 return false;
1454 }
1455
1456 if (Ty != LLT::scalar(64)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001457 LLVM_DEBUG(dbgs() << "G_[SU]MULH has type: " << Ty
1458 << ", expected: " << LLT::scalar(64) << '\n');
Tim Northover9dd78f82017-02-08 21:22:25 +00001459 return false;
1460 }
1461
1462 unsigned NewOpc = I.getOpcode() == TargetOpcode::G_SMULH ? AArch64::SMULHrr
1463 : AArch64::UMULHrr;
1464 I.setDesc(TII.get(NewOpc));
1465
1466 // Now that we selected an opcode, we need to constrain the register
1467 // operands to use appropriate classes.
1468 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1469 }
Ahmed Bougacha33e19fe2016-08-18 16:05:11 +00001470 case TargetOpcode::G_FADD:
1471 case TargetOpcode::G_FSUB:
1472 case TargetOpcode::G_FMUL:
1473 case TargetOpcode::G_FDIV:
1474
Ahmed Bougacha2ac5bf92016-08-16 14:02:47 +00001475 case TargetOpcode::G_ASHR:
Amara Emerson9bf092d2019-04-09 21:22:43 +00001476 if (MRI.getType(I.getOperand(0).getReg()).isVector())
1477 return selectVectorASHR(I, MRI);
1478 LLVM_FALLTHROUGH;
1479 case TargetOpcode::G_SHL:
1480 if (Opcode == TargetOpcode::G_SHL &&
1481 MRI.getType(I.getOperand(0).getReg()).isVector())
1482 return selectVectorSHL(I, MRI);
1483 LLVM_FALLTHROUGH;
1484 case TargetOpcode::G_OR:
1485 case TargetOpcode::G_LSHR:
Tim Northover2fda4b02016-10-10 21:49:49 +00001486 case TargetOpcode::G_GEP: {
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001487 // Reject the various things we don't support yet.
Ahmed Bougacha59e160a2016-08-16 14:37:40 +00001488 if (unsupportedBinOp(I, RBI, MRI, TRI))
1489 return false;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001490
Ahmed Bougacha59e160a2016-08-16 14:37:40 +00001491 const unsigned OpSize = Ty.getSizeInBits();
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001492
1493 const unsigned DefReg = I.getOperand(0).getReg();
1494 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1495
1496 const unsigned NewOpc = selectBinaryOp(I.getOpcode(), RB.getID(), OpSize);
1497 if (NewOpc == I.getOpcode())
1498 return false;
1499
1500 I.setDesc(TII.get(NewOpc));
1501 // FIXME: Should the type be always reset in setDesc?
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001502
1503 // Now that we selected an opcode, we need to constrain the register
1504 // operands to use appropriate classes.
1505 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1506 }
Tim Northover3d38b3a2016-10-11 20:50:21 +00001507
Jessica Paquette7d6784f2019-03-14 22:54:29 +00001508 case TargetOpcode::G_UADDO: {
1509 // TODO: Support other types.
1510 unsigned OpSize = Ty.getSizeInBits();
1511 if (OpSize != 32 && OpSize != 64) {
1512 LLVM_DEBUG(
1513 dbgs()
1514 << "G_UADDO currently only supported for 32 and 64 b types.\n");
1515 return false;
1516 }
1517
1518 // TODO: Support vectors.
1519 if (Ty.isVector()) {
1520 LLVM_DEBUG(dbgs() << "G_UADDO currently only supported for scalars.\n");
1521 return false;
1522 }
1523
1524 // Add and set the set condition flag.
1525 unsigned AddsOpc = OpSize == 32 ? AArch64::ADDSWrr : AArch64::ADDSXrr;
1526 MachineIRBuilder MIRBuilder(I);
1527 auto AddsMI = MIRBuilder.buildInstr(
1528 AddsOpc, {I.getOperand(0).getReg()},
1529 {I.getOperand(2).getReg(), I.getOperand(3).getReg()});
1530 constrainSelectedInstRegOperands(*AddsMI, TII, TRI, RBI);
1531
1532 // Now, put the overflow result in the register given by the first operand
1533 // to the G_UADDO. CSINC increments the result when the predicate is false,
1534 // so to get the increment when it's true, we need to use the inverse. In
1535 // this case, we want to increment when carry is set.
1536 auto CsetMI = MIRBuilder
1537 .buildInstr(AArch64::CSINCWr, {I.getOperand(1).getReg()},
1538 {AArch64::WZR, AArch64::WZR})
1539 .addImm(getInvertedCondCode(AArch64CC::HS));
1540 constrainSelectedInstRegOperands(*CsetMI, TII, TRI, RBI);
1541 I.eraseFromParent();
1542 return true;
1543 }
1544
Tim Northover398c5f52017-02-14 20:56:29 +00001545 case TargetOpcode::G_PTR_MASK: {
1546 uint64_t Align = I.getOperand(2).getImm();
1547 if (Align >= 64 || Align == 0)
1548 return false;
1549
1550 uint64_t Mask = ~((1ULL << Align) - 1);
1551 I.setDesc(TII.get(AArch64::ANDXri));
1552 I.getOperand(2).setImm(AArch64_AM::encodeLogicalImmediate(Mask, 64));
1553
1554 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1555 }
Tim Northover037af52c2016-10-31 18:31:09 +00001556 case TargetOpcode::G_PTRTOINT:
Tim Northoverfb8d9892016-10-12 22:49:15 +00001557 case TargetOpcode::G_TRUNC: {
1558 const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
1559 const LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
1560
1561 const unsigned DstReg = I.getOperand(0).getReg();
1562 const unsigned SrcReg = I.getOperand(1).getReg();
1563
1564 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
1565 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
1566
1567 if (DstRB.getID() != SrcRB.getID()) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001568 LLVM_DEBUG(
1569 dbgs() << "G_TRUNC/G_PTRTOINT input/output on different banks\n");
Tim Northoverfb8d9892016-10-12 22:49:15 +00001570 return false;
1571 }
1572
1573 if (DstRB.getID() == AArch64::GPRRegBankID) {
1574 const TargetRegisterClass *DstRC =
1575 getRegClassForTypeOnBank(DstTy, DstRB, RBI);
1576 if (!DstRC)
1577 return false;
1578
1579 const TargetRegisterClass *SrcRC =
1580 getRegClassForTypeOnBank(SrcTy, SrcRB, RBI);
1581 if (!SrcRC)
1582 return false;
1583
1584 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1585 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001586 LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC/G_PTRTOINT\n");
Tim Northoverfb8d9892016-10-12 22:49:15 +00001587 return false;
1588 }
1589
1590 if (DstRC == SrcRC) {
1591 // Nothing to be done
Daniel Sanderscc36dbf2017-06-27 10:11:39 +00001592 } else if (Opcode == TargetOpcode::G_TRUNC && DstTy == LLT::scalar(32) &&
1593 SrcTy == LLT::scalar(64)) {
1594 llvm_unreachable("TableGen can import this case");
1595 return false;
Tim Northoverfb8d9892016-10-12 22:49:15 +00001596 } else if (DstRC == &AArch64::GPR32RegClass &&
1597 SrcRC == &AArch64::GPR64RegClass) {
1598 I.getOperand(1).setSubReg(AArch64::sub_32);
1599 } else {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001600 LLVM_DEBUG(
1601 dbgs() << "Unhandled mismatched classes in G_TRUNC/G_PTRTOINT\n");
Tim Northoverfb8d9892016-10-12 22:49:15 +00001602 return false;
1603 }
1604
1605 I.setDesc(TII.get(TargetOpcode::COPY));
1606 return true;
1607 } else if (DstRB.getID() == AArch64::FPRRegBankID) {
1608 if (DstTy == LLT::vector(4, 16) && SrcTy == LLT::vector(4, 32)) {
1609 I.setDesc(TII.get(AArch64::XTNv4i16));
1610 constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1611 return true;
1612 }
1613 }
1614
1615 return false;
1616 }
1617
Tim Northover3d38b3a2016-10-11 20:50:21 +00001618 case TargetOpcode::G_ANYEXT: {
1619 const unsigned DstReg = I.getOperand(0).getReg();
1620 const unsigned SrcReg = I.getOperand(1).getReg();
1621
Quentin Colombetcb629a82016-10-12 03:57:49 +00001622 const RegisterBank &RBDst = *RBI.getRegBank(DstReg, MRI, TRI);
1623 if (RBDst.getID() != AArch64::GPRRegBankID) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001624 LLVM_DEBUG(dbgs() << "G_ANYEXT on bank: " << RBDst
1625 << ", expected: GPR\n");
Quentin Colombetcb629a82016-10-12 03:57:49 +00001626 return false;
1627 }
Tim Northover3d38b3a2016-10-11 20:50:21 +00001628
Quentin Colombetcb629a82016-10-12 03:57:49 +00001629 const RegisterBank &RBSrc = *RBI.getRegBank(SrcReg, MRI, TRI);
1630 if (RBSrc.getID() != AArch64::GPRRegBankID) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001631 LLVM_DEBUG(dbgs() << "G_ANYEXT on bank: " << RBSrc
1632 << ", expected: GPR\n");
Tim Northover3d38b3a2016-10-11 20:50:21 +00001633 return false;
1634 }
1635
1636 const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
1637
1638 if (DstSize == 0) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001639 LLVM_DEBUG(dbgs() << "G_ANYEXT operand has no size, not a gvreg?\n");
Tim Northover3d38b3a2016-10-11 20:50:21 +00001640 return false;
1641 }
1642
Quentin Colombetcb629a82016-10-12 03:57:49 +00001643 if (DstSize != 64 && DstSize > 32) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001644 LLVM_DEBUG(dbgs() << "G_ANYEXT to size: " << DstSize
1645 << ", expected: 32 or 64\n");
Tim Northover3d38b3a2016-10-11 20:50:21 +00001646 return false;
1647 }
Quentin Colombetcb629a82016-10-12 03:57:49 +00001648 // At this point G_ANYEXT is just like a plain COPY, but we need
1649 // to explicitly form the 64-bit value if any.
1650 if (DstSize > 32) {
1651 unsigned ExtSrc = MRI.createVirtualRegister(&AArch64::GPR64allRegClass);
1652 BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
1653 .addDef(ExtSrc)
1654 .addImm(0)
1655 .addUse(SrcReg)
1656 .addImm(AArch64::sub_32);
1657 I.getOperand(1).setReg(ExtSrc);
Tim Northover3d38b3a2016-10-11 20:50:21 +00001658 }
Quentin Colombetcb629a82016-10-12 03:57:49 +00001659 return selectCopy(I, TII, MRI, TRI, RBI);
Tim Northover3d38b3a2016-10-11 20:50:21 +00001660 }
1661
1662 case TargetOpcode::G_ZEXT:
1663 case TargetOpcode::G_SEXT: {
1664 unsigned Opcode = I.getOpcode();
1665 const LLT DstTy = MRI.getType(I.getOperand(0).getReg()),
1666 SrcTy = MRI.getType(I.getOperand(1).getReg());
1667 const bool isSigned = Opcode == TargetOpcode::G_SEXT;
1668 const unsigned DefReg = I.getOperand(0).getReg();
1669 const unsigned SrcReg = I.getOperand(1).getReg();
1670 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1671
1672 if (RB.getID() != AArch64::GPRRegBankID) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001673 LLVM_DEBUG(dbgs() << TII.getName(I.getOpcode()) << " on bank: " << RB
1674 << ", expected: GPR\n");
Tim Northover3d38b3a2016-10-11 20:50:21 +00001675 return false;
1676 }
1677
1678 MachineInstr *ExtI;
1679 if (DstTy == LLT::scalar(64)) {
1680 // FIXME: Can we avoid manually doing this?
1681 if (!RBI.constrainGenericRegister(SrcReg, AArch64::GPR32RegClass, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001682 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(Opcode)
1683 << " operand\n");
Tim Northover3d38b3a2016-10-11 20:50:21 +00001684 return false;
1685 }
1686
1687 const unsigned SrcXReg =
1688 MRI.createVirtualRegister(&AArch64::GPR64RegClass);
1689 BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
1690 .addDef(SrcXReg)
1691 .addImm(0)
1692 .addUse(SrcReg)
1693 .addImm(AArch64::sub_32);
1694
1695 const unsigned NewOpc = isSigned ? AArch64::SBFMXri : AArch64::UBFMXri;
1696 ExtI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
1697 .addDef(DefReg)
1698 .addUse(SrcXReg)
1699 .addImm(0)
1700 .addImm(SrcTy.getSizeInBits() - 1);
Tim Northovera9105be2016-11-09 22:39:54 +00001701 } else if (DstTy.isScalar() && DstTy.getSizeInBits() <= 32) {
Tim Northover3d38b3a2016-10-11 20:50:21 +00001702 const unsigned NewOpc = isSigned ? AArch64::SBFMWri : AArch64::UBFMWri;
1703 ExtI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
1704 .addDef(DefReg)
1705 .addUse(SrcReg)
1706 .addImm(0)
1707 .addImm(SrcTy.getSizeInBits() - 1);
1708 } else {
1709 return false;
1710 }
1711
1712 constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1713
1714 I.eraseFromParent();
1715 return true;
1716 }
Tim Northoverc1d8c2b2016-10-11 22:29:23 +00001717
Tim Northover69271c62016-10-12 22:49:11 +00001718 case TargetOpcode::G_SITOFP:
1719 case TargetOpcode::G_UITOFP:
1720 case TargetOpcode::G_FPTOSI:
1721 case TargetOpcode::G_FPTOUI: {
1722 const LLT DstTy = MRI.getType(I.getOperand(0).getReg()),
1723 SrcTy = MRI.getType(I.getOperand(1).getReg());
1724 const unsigned NewOpc = selectFPConvOpc(Opcode, DstTy, SrcTy);
1725 if (NewOpc == Opcode)
1726 return false;
1727
1728 I.setDesc(TII.get(NewOpc));
1729 constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1730
1731 return true;
1732 }
1733
1734
Tim Northoverc1d8c2b2016-10-11 22:29:23 +00001735 case TargetOpcode::G_INTTOPTR:
Daniel Sandersedd07842017-08-17 09:26:14 +00001736 // The importer is currently unable to import pointer types since they
1737 // didn't exist in SelectionDAG.
Daniel Sanderseb2f5f32017-08-15 15:10:31 +00001738 return selectCopy(I, TII, MRI, TRI, RBI);
Daniel Sanders16e6dd32017-08-15 13:50:09 +00001739
Daniel Sandersedd07842017-08-17 09:26:14 +00001740 case TargetOpcode::G_BITCAST:
1741 // Imported SelectionDAG rules can handle every bitcast except those that
1742 // bitcast from a type to the same type. Ideally, these shouldn't occur
Amara Emersonb9560512019-04-11 20:32:24 +00001743 // but we might not run an optimizer that deletes them. The other exception
1744 // is bitcasts involving pointer types, as SelectionDAG has no knowledge
1745 // of them.
1746 return selectCopy(I, TII, MRI, TRI, RBI);
Daniel Sandersedd07842017-08-17 09:26:14 +00001747
Tim Northover9ac0eba2016-11-08 00:45:29 +00001748 case TargetOpcode::G_SELECT: {
1749 if (MRI.getType(I.getOperand(1).getReg()) != LLT::scalar(1)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001750 LLVM_DEBUG(dbgs() << "G_SELECT cond has type: " << Ty
1751 << ", expected: " << LLT::scalar(1) << '\n');
Tim Northover9ac0eba2016-11-08 00:45:29 +00001752 return false;
1753 }
1754
1755 const unsigned CondReg = I.getOperand(1).getReg();
1756 const unsigned TReg = I.getOperand(2).getReg();
1757 const unsigned FReg = I.getOperand(3).getReg();
1758
Jessica Paquette910630c2019-05-03 22:37:46 +00001759 // If we have a floating-point result, then we should use a floating point
1760 // select instead of an integer select.
1761 bool IsFP = (RBI.getRegBank(I.getOperand(0).getReg(), MRI, TRI)->getID() !=
1762 AArch64::GPRRegBankID);
Tim Northover9ac0eba2016-11-08 00:45:29 +00001763 unsigned CSelOpc = 0;
1764
1765 if (Ty == LLT::scalar(32)) {
Jessica Paquette910630c2019-05-03 22:37:46 +00001766 CSelOpc = IsFP ? AArch64::FCSELSrrr : AArch64::CSELWr;
Kristof Beylse9412b42017-01-19 13:32:14 +00001767 } else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) {
Jessica Paquette910630c2019-05-03 22:37:46 +00001768 CSelOpc = IsFP ? AArch64::FCSELDrrr : AArch64::CSELXr;
Tim Northover9ac0eba2016-11-08 00:45:29 +00001769 } else {
1770 return false;
1771 }
1772
1773 MachineInstr &TstMI =
1774 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ANDSWri))
1775 .addDef(AArch64::WZR)
1776 .addUse(CondReg)
1777 .addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
1778
1779 MachineInstr &CSelMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CSelOpc))
1780 .addDef(I.getOperand(0).getReg())
1781 .addUse(TReg)
1782 .addUse(FReg)
1783 .addImm(AArch64CC::NE);
1784
1785 constrainSelectedInstRegOperands(TstMI, TII, TRI, RBI);
1786 constrainSelectedInstRegOperands(CSelMI, TII, TRI, RBI);
1787
1788 I.eraseFromParent();
1789 return true;
1790 }
Tim Northover6c02ad52016-10-12 22:49:04 +00001791 case TargetOpcode::G_ICMP: {
Amara Emerson9bf092d2019-04-09 21:22:43 +00001792 if (Ty.isVector())
1793 return selectVectorICmp(I, MRI);
1794
Aditya Nandakumar02c602e2017-07-31 17:00:16 +00001795 if (Ty != LLT::scalar(32)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001796 LLVM_DEBUG(dbgs() << "G_ICMP result has type: " << Ty
1797 << ", expected: " << LLT::scalar(32) << '\n');
Tim Northover6c02ad52016-10-12 22:49:04 +00001798 return false;
1799 }
1800
1801 unsigned CmpOpc = 0;
1802 unsigned ZReg = 0;
1803
1804 LLT CmpTy = MRI.getType(I.getOperand(2).getReg());
1805 if (CmpTy == LLT::scalar(32)) {
1806 CmpOpc = AArch64::SUBSWrr;
1807 ZReg = AArch64::WZR;
1808 } else if (CmpTy == LLT::scalar(64) || CmpTy.isPointer()) {
1809 CmpOpc = AArch64::SUBSXrr;
1810 ZReg = AArch64::XZR;
1811 } else {
1812 return false;
1813 }
1814
Kristof Beyls22524402017-01-05 10:16:08 +00001815 // CSINC increments the result by one when the condition code is false.
1816 // Therefore, we have to invert the predicate to get an increment by 1 when
1817 // the predicate is true.
1818 const AArch64CC::CondCode invCC =
1819 changeICMPPredToAArch64CC(CmpInst::getInversePredicate(
1820 (CmpInst::Predicate)I.getOperand(1).getPredicate()));
Tim Northover6c02ad52016-10-12 22:49:04 +00001821
1822 MachineInstr &CmpMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CmpOpc))
1823 .addDef(ZReg)
1824 .addUse(I.getOperand(2).getReg())
1825 .addUse(I.getOperand(3).getReg());
1826
1827 MachineInstr &CSetMI =
1828 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
1829 .addDef(I.getOperand(0).getReg())
1830 .addUse(AArch64::WZR)
1831 .addUse(AArch64::WZR)
Kristof Beyls22524402017-01-05 10:16:08 +00001832 .addImm(invCC);
Tim Northover6c02ad52016-10-12 22:49:04 +00001833
1834 constrainSelectedInstRegOperands(CmpMI, TII, TRI, RBI);
1835 constrainSelectedInstRegOperands(CSetMI, TII, TRI, RBI);
1836
1837 I.eraseFromParent();
1838 return true;
1839 }
1840
Tim Northover7dd378d2016-10-12 22:49:07 +00001841 case TargetOpcode::G_FCMP: {
Aditya Nandakumar02c602e2017-07-31 17:00:16 +00001842 if (Ty != LLT::scalar(32)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001843 LLVM_DEBUG(dbgs() << "G_FCMP result has type: " << Ty
1844 << ", expected: " << LLT::scalar(32) << '\n');
Tim Northover7dd378d2016-10-12 22:49:07 +00001845 return false;
1846 }
1847
1848 unsigned CmpOpc = 0;
1849 LLT CmpTy = MRI.getType(I.getOperand(2).getReg());
1850 if (CmpTy == LLT::scalar(32)) {
1851 CmpOpc = AArch64::FCMPSrr;
1852 } else if (CmpTy == LLT::scalar(64)) {
1853 CmpOpc = AArch64::FCMPDrr;
1854 } else {
1855 return false;
1856 }
1857
1858 // FIXME: regbank
1859
1860 AArch64CC::CondCode CC1, CC2;
1861 changeFCMPPredToAArch64CC(
1862 (CmpInst::Predicate)I.getOperand(1).getPredicate(), CC1, CC2);
1863
1864 MachineInstr &CmpMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CmpOpc))
1865 .addUse(I.getOperand(2).getReg())
1866 .addUse(I.getOperand(3).getReg());
1867
1868 const unsigned DefReg = I.getOperand(0).getReg();
1869 unsigned Def1Reg = DefReg;
1870 if (CC2 != AArch64CC::AL)
1871 Def1Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
1872
1873 MachineInstr &CSetMI =
1874 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
1875 .addDef(Def1Reg)
1876 .addUse(AArch64::WZR)
1877 .addUse(AArch64::WZR)
Tim Northover33a1a0b2017-01-17 23:04:01 +00001878 .addImm(getInvertedCondCode(CC1));
Tim Northover7dd378d2016-10-12 22:49:07 +00001879
1880 if (CC2 != AArch64CC::AL) {
1881 unsigned Def2Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
1882 MachineInstr &CSet2MI =
1883 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
1884 .addDef(Def2Reg)
1885 .addUse(AArch64::WZR)
1886 .addUse(AArch64::WZR)
Tim Northover33a1a0b2017-01-17 23:04:01 +00001887 .addImm(getInvertedCondCode(CC2));
Tim Northover7dd378d2016-10-12 22:49:07 +00001888 MachineInstr &OrMI =
1889 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ORRWrr))
1890 .addDef(DefReg)
1891 .addUse(Def1Reg)
1892 .addUse(Def2Reg);
1893 constrainSelectedInstRegOperands(OrMI, TII, TRI, RBI);
1894 constrainSelectedInstRegOperands(CSet2MI, TII, TRI, RBI);
1895 }
1896
1897 constrainSelectedInstRegOperands(CmpMI, TII, TRI, RBI);
1898 constrainSelectedInstRegOperands(CSetMI, TII, TRI, RBI);
1899
1900 I.eraseFromParent();
1901 return true;
1902 }
Tim Northovere9600d82017-02-08 17:57:27 +00001903 case TargetOpcode::G_VASTART:
1904 return STI.isTargetDarwin() ? selectVaStartDarwin(I, MF, MRI)
1905 : selectVaStartAAPCS(I, MF, MRI);
Jessica Paquette7f6fe7c2019-04-29 20:58:17 +00001906 case TargetOpcode::G_INTRINSIC:
1907 return selectIntrinsic(I, MRI);
Amara Emerson1f5d9942018-04-25 14:43:59 +00001908 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
Jessica Paquette22c62152019-04-02 19:57:26 +00001909 return selectIntrinsicWithSideEffects(I, MRI);
Amara Emerson1e8c1642018-07-31 00:09:02 +00001910 case TargetOpcode::G_IMPLICIT_DEF: {
Justin Bogner4fc69662017-07-12 17:32:32 +00001911 I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
Amara Emerson58aea522018-02-02 01:44:43 +00001912 const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
1913 const unsigned DstReg = I.getOperand(0).getReg();
1914 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
1915 const TargetRegisterClass *DstRC =
1916 getRegClassForTypeOnBank(DstTy, DstRB, RBI);
1917 RBI.constrainGenericRegister(DstReg, *DstRC, MRI);
Justin Bogner4fc69662017-07-12 17:32:32 +00001918 return true;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001919 }
Amara Emerson1e8c1642018-07-31 00:09:02 +00001920 case TargetOpcode::G_BLOCK_ADDR: {
1921 if (TM.getCodeModel() == CodeModel::Large) {
1922 materializeLargeCMVal(I, I.getOperand(1).getBlockAddress(), 0);
1923 I.eraseFromParent();
1924 return true;
1925 } else {
1926 I.setDesc(TII.get(AArch64::MOVaddrBA));
1927 auto MovMI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::MOVaddrBA),
1928 I.getOperand(0).getReg())
1929 .addBlockAddress(I.getOperand(1).getBlockAddress(),
1930 /* Offset */ 0, AArch64II::MO_PAGE)
1931 .addBlockAddress(
1932 I.getOperand(1).getBlockAddress(), /* Offset */ 0,
1933 AArch64II::MO_NC | AArch64II::MO_PAGEOFF);
1934 I.eraseFromParent();
1935 return constrainSelectedInstRegOperands(*MovMI, TII, TRI, RBI);
1936 }
1937 }
Jessica Paquette991cb392019-04-23 20:46:19 +00001938 case TargetOpcode::G_INTRINSIC_TRUNC:
1939 return selectIntrinsicTrunc(I, MRI);
Jessica Paquette4fe75742019-04-23 23:03:03 +00001940 case TargetOpcode::G_INTRINSIC_ROUND:
1941 return selectIntrinsicRound(I, MRI);
Amara Emerson5ec14602018-12-10 18:44:58 +00001942 case TargetOpcode::G_BUILD_VECTOR:
1943 return selectBuildVector(I, MRI);
Amara Emerson8cb186c2018-12-20 01:11:04 +00001944 case TargetOpcode::G_MERGE_VALUES:
1945 return selectMergeValues(I, MRI);
Jessica Paquette245047d2019-01-24 22:00:41 +00001946 case TargetOpcode::G_UNMERGE_VALUES:
1947 return selectUnmergeValues(I, MRI);
Amara Emerson1abe05c2019-02-21 20:20:16 +00001948 case TargetOpcode::G_SHUFFLE_VECTOR:
1949 return selectShuffleVector(I, MRI);
Jessica Paquette607774c2019-03-11 22:18:01 +00001950 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
1951 return selectExtractElt(I, MRI);
Jessica Paquette5aff1f42019-03-14 18:01:30 +00001952 case TargetOpcode::G_INSERT_VECTOR_ELT:
1953 return selectInsertElt(I, MRI);
Amara Emerson2ff22982019-03-14 22:48:15 +00001954 case TargetOpcode::G_CONCAT_VECTORS:
1955 return selectConcatVectors(I, MRI);
Amara Emerson1e8c1642018-07-31 00:09:02 +00001956 }
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001957
1958 return false;
1959}
Daniel Sanders8a4bae92017-03-14 21:32:08 +00001960
Jessica Paquette991cb392019-04-23 20:46:19 +00001961bool AArch64InstructionSelector::selectIntrinsicTrunc(
1962 MachineInstr &I, MachineRegisterInfo &MRI) const {
1963 const LLT SrcTy = MRI.getType(I.getOperand(0).getReg());
1964
1965 // Select the correct opcode.
1966 unsigned Opc = 0;
1967 if (!SrcTy.isVector()) {
1968 switch (SrcTy.getSizeInBits()) {
1969 default:
1970 case 16:
1971 Opc = AArch64::FRINTZHr;
1972 break;
1973 case 32:
1974 Opc = AArch64::FRINTZSr;
1975 break;
1976 case 64:
1977 Opc = AArch64::FRINTZDr;
1978 break;
1979 }
1980 } else {
1981 unsigned NumElts = SrcTy.getNumElements();
1982 switch (SrcTy.getElementType().getSizeInBits()) {
1983 default:
1984 break;
1985 case 16:
1986 if (NumElts == 4)
1987 Opc = AArch64::FRINTZv4f16;
1988 else if (NumElts == 8)
1989 Opc = AArch64::FRINTZv8f16;
1990 break;
1991 case 32:
1992 if (NumElts == 2)
1993 Opc = AArch64::FRINTZv2f32;
1994 else if (NumElts == 4)
1995 Opc = AArch64::FRINTZv4f32;
1996 break;
1997 case 64:
1998 if (NumElts == 2)
1999 Opc = AArch64::FRINTZv2f64;
2000 break;
2001 }
2002 }
2003
2004 if (!Opc) {
2005 // Didn't get an opcode above, bail.
2006 LLVM_DEBUG(dbgs() << "Unsupported type for G_INTRINSIC_TRUNC!\n");
2007 return false;
2008 }
2009
2010 // Legalization would have set us up perfectly for this; we just need to
2011 // set the opcode and move on.
2012 I.setDesc(TII.get(Opc));
2013 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2014}
2015
Jessica Paquette4fe75742019-04-23 23:03:03 +00002016bool AArch64InstructionSelector::selectIntrinsicRound(
2017 MachineInstr &I, MachineRegisterInfo &MRI) const {
2018 const LLT SrcTy = MRI.getType(I.getOperand(0).getReg());
2019
2020 // Select the correct opcode.
2021 unsigned Opc = 0;
2022 if (!SrcTy.isVector()) {
2023 switch (SrcTy.getSizeInBits()) {
2024 default:
2025 case 16:
2026 Opc = AArch64::FRINTAHr;
2027 break;
2028 case 32:
2029 Opc = AArch64::FRINTASr;
2030 break;
2031 case 64:
2032 Opc = AArch64::FRINTADr;
2033 break;
2034 }
2035 } else {
2036 unsigned NumElts = SrcTy.getNumElements();
2037 switch (SrcTy.getElementType().getSizeInBits()) {
2038 default:
2039 break;
2040 case 16:
2041 if (NumElts == 4)
2042 Opc = AArch64::FRINTAv4f16;
2043 else if (NumElts == 8)
2044 Opc = AArch64::FRINTAv8f16;
2045 break;
2046 case 32:
2047 if (NumElts == 2)
2048 Opc = AArch64::FRINTAv2f32;
2049 else if (NumElts == 4)
2050 Opc = AArch64::FRINTAv4f32;
2051 break;
2052 case 64:
2053 if (NumElts == 2)
2054 Opc = AArch64::FRINTAv2f64;
2055 break;
2056 }
2057 }
2058
2059 if (!Opc) {
2060 // Didn't get an opcode above, bail.
2061 LLVM_DEBUG(dbgs() << "Unsupported type for G_INTRINSIC_ROUND!\n");
2062 return false;
2063 }
2064
2065 // Legalization would have set us up perfectly for this; we just need to
2066 // set the opcode and move on.
2067 I.setDesc(TII.get(Opc));
2068 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2069}
2070
Amara Emerson9bf092d2019-04-09 21:22:43 +00002071bool AArch64InstructionSelector::selectVectorICmp(
2072 MachineInstr &I, MachineRegisterInfo &MRI) const {
2073 unsigned DstReg = I.getOperand(0).getReg();
2074 LLT DstTy = MRI.getType(DstReg);
2075 unsigned SrcReg = I.getOperand(2).getReg();
2076 unsigned Src2Reg = I.getOperand(3).getReg();
2077 LLT SrcTy = MRI.getType(SrcReg);
2078
2079 unsigned SrcEltSize = SrcTy.getElementType().getSizeInBits();
2080 unsigned NumElts = DstTy.getNumElements();
2081
2082 // First index is element size, 0 == 8b, 1 == 16b, 2 == 32b, 3 == 64b
2083 // Second index is num elts, 0 == v2, 1 == v4, 2 == v8, 3 == v16
2084 // Third index is cc opcode:
2085 // 0 == eq
2086 // 1 == ugt
2087 // 2 == uge
2088 // 3 == ult
2089 // 4 == ule
2090 // 5 == sgt
2091 // 6 == sge
2092 // 7 == slt
2093 // 8 == sle
2094 // ne is done by negating 'eq' result.
2095
2096 // This table below assumes that for some comparisons the operands will be
2097 // commuted.
2098 // ult op == commute + ugt op
2099 // ule op == commute + uge op
2100 // slt op == commute + sgt op
2101 // sle op == commute + sge op
2102 unsigned PredIdx = 0;
2103 bool SwapOperands = false;
2104 CmpInst::Predicate Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
2105 switch (Pred) {
2106 case CmpInst::ICMP_NE:
2107 case CmpInst::ICMP_EQ:
2108 PredIdx = 0;
2109 break;
2110 case CmpInst::ICMP_UGT:
2111 PredIdx = 1;
2112 break;
2113 case CmpInst::ICMP_UGE:
2114 PredIdx = 2;
2115 break;
2116 case CmpInst::ICMP_ULT:
2117 PredIdx = 3;
2118 SwapOperands = true;
2119 break;
2120 case CmpInst::ICMP_ULE:
2121 PredIdx = 4;
2122 SwapOperands = true;
2123 break;
2124 case CmpInst::ICMP_SGT:
2125 PredIdx = 5;
2126 break;
2127 case CmpInst::ICMP_SGE:
2128 PredIdx = 6;
2129 break;
2130 case CmpInst::ICMP_SLT:
2131 PredIdx = 7;
2132 SwapOperands = true;
2133 break;
2134 case CmpInst::ICMP_SLE:
2135 PredIdx = 8;
2136 SwapOperands = true;
2137 break;
2138 default:
2139 llvm_unreachable("Unhandled icmp predicate");
2140 return false;
2141 }
2142
2143 // This table obviously should be tablegen'd when we have our GISel native
2144 // tablegen selector.
2145
2146 static const unsigned OpcTable[4][4][9] = {
2147 {
2148 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2149 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2150 0 /* invalid */},
2151 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2152 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2153 0 /* invalid */},
2154 {AArch64::CMEQv8i8, AArch64::CMHIv8i8, AArch64::CMHSv8i8,
2155 AArch64::CMHIv8i8, AArch64::CMHSv8i8, AArch64::CMGTv8i8,
2156 AArch64::CMGEv8i8, AArch64::CMGTv8i8, AArch64::CMGEv8i8},
2157 {AArch64::CMEQv16i8, AArch64::CMHIv16i8, AArch64::CMHSv16i8,
2158 AArch64::CMHIv16i8, AArch64::CMHSv16i8, AArch64::CMGTv16i8,
2159 AArch64::CMGEv16i8, AArch64::CMGTv16i8, AArch64::CMGEv16i8}
2160 },
2161 {
2162 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2163 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2164 0 /* invalid */},
2165 {AArch64::CMEQv4i16, AArch64::CMHIv4i16, AArch64::CMHSv4i16,
2166 AArch64::CMHIv4i16, AArch64::CMHSv4i16, AArch64::CMGTv4i16,
2167 AArch64::CMGEv4i16, AArch64::CMGTv4i16, AArch64::CMGEv4i16},
2168 {AArch64::CMEQv8i16, AArch64::CMHIv8i16, AArch64::CMHSv8i16,
2169 AArch64::CMHIv8i16, AArch64::CMHSv8i16, AArch64::CMGTv8i16,
2170 AArch64::CMGEv8i16, AArch64::CMGTv8i16, AArch64::CMGEv8i16},
2171 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2172 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2173 0 /* invalid */}
2174 },
2175 {
2176 {AArch64::CMEQv2i32, AArch64::CMHIv2i32, AArch64::CMHSv2i32,
2177 AArch64::CMHIv2i32, AArch64::CMHSv2i32, AArch64::CMGTv2i32,
2178 AArch64::CMGEv2i32, AArch64::CMGTv2i32, AArch64::CMGEv2i32},
2179 {AArch64::CMEQv4i32, AArch64::CMHIv4i32, AArch64::CMHSv4i32,
2180 AArch64::CMHIv4i32, AArch64::CMHSv4i32, AArch64::CMGTv4i32,
2181 AArch64::CMGEv4i32, AArch64::CMGTv4i32, AArch64::CMGEv4i32},
2182 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2183 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2184 0 /* invalid */},
2185 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2186 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2187 0 /* invalid */}
2188 },
2189 {
2190 {AArch64::CMEQv2i64, AArch64::CMHIv2i64, AArch64::CMHSv2i64,
2191 AArch64::CMHIv2i64, AArch64::CMHSv2i64, AArch64::CMGTv2i64,
2192 AArch64::CMGEv2i64, AArch64::CMGTv2i64, AArch64::CMGEv2i64},
2193 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2194 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2195 0 /* invalid */},
2196 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2197 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2198 0 /* invalid */},
2199 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2200 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2201 0 /* invalid */}
2202 },
2203 };
2204 unsigned EltIdx = Log2_32(SrcEltSize / 8);
2205 unsigned NumEltsIdx = Log2_32(NumElts / 2);
2206 unsigned Opc = OpcTable[EltIdx][NumEltsIdx][PredIdx];
2207 if (!Opc) {
2208 LLVM_DEBUG(dbgs() << "Could not map G_ICMP to cmp opcode");
2209 return false;
2210 }
2211
2212 const RegisterBank &VecRB = *RBI.getRegBank(SrcReg, MRI, TRI);
2213 const TargetRegisterClass *SrcRC =
2214 getRegClassForTypeOnBank(SrcTy, VecRB, RBI, true);
2215 if (!SrcRC) {
2216 LLVM_DEBUG(dbgs() << "Could not determine source register class.\n");
2217 return false;
2218 }
2219
2220 unsigned NotOpc = Pred == ICmpInst::ICMP_NE ? AArch64::NOTv8i8 : 0;
2221 if (SrcTy.getSizeInBits() == 128)
2222 NotOpc = NotOpc ? AArch64::NOTv16i8 : 0;
2223
2224 if (SwapOperands)
2225 std::swap(SrcReg, Src2Reg);
2226
2227 MachineIRBuilder MIB(I);
2228 auto Cmp = MIB.buildInstr(Opc, {SrcRC}, {SrcReg, Src2Reg});
2229 constrainSelectedInstRegOperands(*Cmp, TII, TRI, RBI);
2230
2231 // Invert if we had a 'ne' cc.
2232 if (NotOpc) {
2233 Cmp = MIB.buildInstr(NotOpc, {DstReg}, {Cmp});
2234 constrainSelectedInstRegOperands(*Cmp, TII, TRI, RBI);
2235 } else {
2236 MIB.buildCopy(DstReg, Cmp.getReg(0));
2237 }
2238 RBI.constrainGenericRegister(DstReg, *SrcRC, MRI);
2239 I.eraseFromParent();
2240 return true;
2241}
2242
Amara Emerson6bcfa1c2019-02-25 18:52:54 +00002243MachineInstr *AArch64InstructionSelector::emitScalarToVector(
Amara Emerson8acb0d92019-03-04 19:16:00 +00002244 unsigned EltSize, const TargetRegisterClass *DstRC, unsigned Scalar,
Amara Emerson6bcfa1c2019-02-25 18:52:54 +00002245 MachineIRBuilder &MIRBuilder) const {
2246 auto Undef = MIRBuilder.buildInstr(TargetOpcode::IMPLICIT_DEF, {DstRC}, {});
Amara Emerson5ec14602018-12-10 18:44:58 +00002247
2248 auto BuildFn = [&](unsigned SubregIndex) {
Amara Emerson6bcfa1c2019-02-25 18:52:54 +00002249 auto Ins =
2250 MIRBuilder
2251 .buildInstr(TargetOpcode::INSERT_SUBREG, {DstRC}, {Undef, Scalar})
2252 .addImm(SubregIndex);
2253 constrainSelectedInstRegOperands(*Undef, TII, TRI, RBI);
2254 constrainSelectedInstRegOperands(*Ins, TII, TRI, RBI);
2255 return &*Ins;
Amara Emerson5ec14602018-12-10 18:44:58 +00002256 };
2257
Amara Emerson8acb0d92019-03-04 19:16:00 +00002258 switch (EltSize) {
Jessica Paquette245047d2019-01-24 22:00:41 +00002259 case 16:
2260 return BuildFn(AArch64::hsub);
Amara Emerson5ec14602018-12-10 18:44:58 +00002261 case 32:
2262 return BuildFn(AArch64::ssub);
2263 case 64:
2264 return BuildFn(AArch64::dsub);
2265 default:
Amara Emerson6bcfa1c2019-02-25 18:52:54 +00002266 return nullptr;
Amara Emerson5ec14602018-12-10 18:44:58 +00002267 }
2268}
2269
Amara Emerson8cb186c2018-12-20 01:11:04 +00002270bool AArch64InstructionSelector::selectMergeValues(
2271 MachineInstr &I, MachineRegisterInfo &MRI) const {
2272 assert(I.getOpcode() == TargetOpcode::G_MERGE_VALUES && "unexpected opcode");
2273 const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
2274 const LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
2275 assert(!DstTy.isVector() && !SrcTy.isVector() && "invalid merge operation");
2276
2277 // At the moment we only support merging two s32s into an s64.
2278 if (I.getNumOperands() != 3)
2279 return false;
2280 if (DstTy.getSizeInBits() != 64 || SrcTy.getSizeInBits() != 32)
2281 return false;
2282 const RegisterBank &RB = *RBI.getRegBank(I.getOperand(1).getReg(), MRI, TRI);
2283 if (RB.getID() != AArch64::GPRRegBankID)
2284 return false;
2285
2286 auto *DstRC = &AArch64::GPR64RegClass;
2287 unsigned SubToRegDef = MRI.createVirtualRegister(DstRC);
2288 MachineInstr &SubRegMI = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
2289 TII.get(TargetOpcode::SUBREG_TO_REG))
2290 .addDef(SubToRegDef)
2291 .addImm(0)
2292 .addUse(I.getOperand(1).getReg())
2293 .addImm(AArch64::sub_32);
2294 unsigned SubToRegDef2 = MRI.createVirtualRegister(DstRC);
2295 // Need to anyext the second scalar before we can use bfm
2296 MachineInstr &SubRegMI2 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
2297 TII.get(TargetOpcode::SUBREG_TO_REG))
2298 .addDef(SubToRegDef2)
2299 .addImm(0)
2300 .addUse(I.getOperand(2).getReg())
2301 .addImm(AArch64::sub_32);
Amara Emerson8cb186c2018-12-20 01:11:04 +00002302 MachineInstr &BFM =
2303 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::BFMXri))
Amara Emerson321bfb22018-12-20 03:27:42 +00002304 .addDef(I.getOperand(0).getReg())
Amara Emerson8cb186c2018-12-20 01:11:04 +00002305 .addUse(SubToRegDef)
2306 .addUse(SubToRegDef2)
2307 .addImm(32)
2308 .addImm(31);
2309 constrainSelectedInstRegOperands(SubRegMI, TII, TRI, RBI);
2310 constrainSelectedInstRegOperands(SubRegMI2, TII, TRI, RBI);
2311 constrainSelectedInstRegOperands(BFM, TII, TRI, RBI);
2312 I.eraseFromParent();
2313 return true;
2314}
2315
Jessica Paquette607774c2019-03-11 22:18:01 +00002316static bool getLaneCopyOpcode(unsigned &CopyOpc, unsigned &ExtractSubReg,
2317 const unsigned EltSize) {
2318 // Choose a lane copy opcode and subregister based off of the size of the
2319 // vector's elements.
2320 switch (EltSize) {
2321 case 16:
2322 CopyOpc = AArch64::CPYi16;
2323 ExtractSubReg = AArch64::hsub;
2324 break;
2325 case 32:
2326 CopyOpc = AArch64::CPYi32;
2327 ExtractSubReg = AArch64::ssub;
2328 break;
2329 case 64:
2330 CopyOpc = AArch64::CPYi64;
2331 ExtractSubReg = AArch64::dsub;
2332 break;
2333 default:
2334 // Unknown size, bail out.
2335 LLVM_DEBUG(dbgs() << "Elt size '" << EltSize << "' unsupported.\n");
2336 return false;
2337 }
2338 return true;
2339}
2340
Amara Emersond61b89b2019-03-14 22:48:18 +00002341MachineInstr *AArch64InstructionSelector::emitExtractVectorElt(
2342 Optional<unsigned> DstReg, const RegisterBank &DstRB, LLT ScalarTy,
2343 unsigned VecReg, unsigned LaneIdx, MachineIRBuilder &MIRBuilder) const {
2344 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
2345 unsigned CopyOpc = 0;
2346 unsigned ExtractSubReg = 0;
2347 if (!getLaneCopyOpcode(CopyOpc, ExtractSubReg, ScalarTy.getSizeInBits())) {
2348 LLVM_DEBUG(
2349 dbgs() << "Couldn't determine lane copy opcode for instruction.\n");
2350 return nullptr;
2351 }
2352
2353 const TargetRegisterClass *DstRC =
2354 getRegClassForTypeOnBank(ScalarTy, DstRB, RBI, true);
2355 if (!DstRC) {
2356 LLVM_DEBUG(dbgs() << "Could not determine destination register class.\n");
2357 return nullptr;
2358 }
2359
2360 const RegisterBank &VecRB = *RBI.getRegBank(VecReg, MRI, TRI);
2361 const LLT &VecTy = MRI.getType(VecReg);
2362 const TargetRegisterClass *VecRC =
2363 getRegClassForTypeOnBank(VecTy, VecRB, RBI, true);
2364 if (!VecRC) {
2365 LLVM_DEBUG(dbgs() << "Could not determine source register class.\n");
2366 return nullptr;
2367 }
2368
2369 // The register that we're going to copy into.
2370 unsigned InsertReg = VecReg;
2371 if (!DstReg)
2372 DstReg = MRI.createVirtualRegister(DstRC);
2373 // If the lane index is 0, we just use a subregister COPY.
2374 if (LaneIdx == 0) {
Amara Emerson86271782019-03-18 19:20:10 +00002375 auto Copy = MIRBuilder.buildInstr(TargetOpcode::COPY, {*DstReg}, {})
2376 .addReg(VecReg, 0, ExtractSubReg);
Amara Emersond61b89b2019-03-14 22:48:18 +00002377 RBI.constrainGenericRegister(*DstReg, *DstRC, MRI);
Amara Emerson3739a202019-03-15 21:59:50 +00002378 return &*Copy;
Amara Emersond61b89b2019-03-14 22:48:18 +00002379 }
2380
2381 // Lane copies require 128-bit wide registers. If we're dealing with an
2382 // unpacked vector, then we need to move up to that width. Insert an implicit
2383 // def and a subregister insert to get us there.
2384 if (VecTy.getSizeInBits() != 128) {
2385 MachineInstr *ScalarToVector = emitScalarToVector(
2386 VecTy.getSizeInBits(), &AArch64::FPR128RegClass, VecReg, MIRBuilder);
2387 if (!ScalarToVector)
2388 return nullptr;
2389 InsertReg = ScalarToVector->getOperand(0).getReg();
2390 }
2391
2392 MachineInstr *LaneCopyMI =
2393 MIRBuilder.buildInstr(CopyOpc, {*DstReg}, {InsertReg}).addImm(LaneIdx);
2394 constrainSelectedInstRegOperands(*LaneCopyMI, TII, TRI, RBI);
2395
2396 // Make sure that we actually constrain the initial copy.
2397 RBI.constrainGenericRegister(*DstReg, *DstRC, MRI);
2398 return LaneCopyMI;
2399}
2400
Jessica Paquette607774c2019-03-11 22:18:01 +00002401bool AArch64InstructionSelector::selectExtractElt(
2402 MachineInstr &I, MachineRegisterInfo &MRI) const {
2403 assert(I.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT &&
2404 "unexpected opcode!");
2405 unsigned DstReg = I.getOperand(0).getReg();
2406 const LLT NarrowTy = MRI.getType(DstReg);
2407 const unsigned SrcReg = I.getOperand(1).getReg();
2408 const LLT WideTy = MRI.getType(SrcReg);
Amara Emersond61b89b2019-03-14 22:48:18 +00002409 (void)WideTy;
Jessica Paquette607774c2019-03-11 22:18:01 +00002410 assert(WideTy.getSizeInBits() >= NarrowTy.getSizeInBits() &&
2411 "source register size too small!");
2412 assert(NarrowTy.isScalar() && "cannot extract vector into vector!");
2413
2414 // Need the lane index to determine the correct copy opcode.
2415 MachineOperand &LaneIdxOp = I.getOperand(2);
2416 assert(LaneIdxOp.isReg() && "Lane index operand was not a register?");
2417
2418 if (RBI.getRegBank(DstReg, MRI, TRI)->getID() != AArch64::FPRRegBankID) {
2419 LLVM_DEBUG(dbgs() << "Cannot extract into GPR.\n");
2420 return false;
2421 }
2422
Jessica Paquettebb1aced2019-03-13 21:19:29 +00002423 // Find the index to extract from.
Jessica Paquette76f64b62019-04-26 21:53:13 +00002424 auto VRegAndVal = getConstantVRegValWithLookThrough(LaneIdxOp.getReg(), MRI);
2425 if (!VRegAndVal)
Jessica Paquette607774c2019-03-11 22:18:01 +00002426 return false;
Jessica Paquette76f64b62019-04-26 21:53:13 +00002427 unsigned LaneIdx = VRegAndVal->Value;
Jessica Paquette607774c2019-03-11 22:18:01 +00002428
Jessica Paquette607774c2019-03-11 22:18:01 +00002429 MachineIRBuilder MIRBuilder(I);
2430
Amara Emersond61b89b2019-03-14 22:48:18 +00002431 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
2432 MachineInstr *Extract = emitExtractVectorElt(DstReg, DstRB, NarrowTy, SrcReg,
2433 LaneIdx, MIRBuilder);
2434 if (!Extract)
2435 return false;
2436
2437 I.eraseFromParent();
2438 return true;
2439}
2440
2441bool AArch64InstructionSelector::selectSplitVectorUnmerge(
2442 MachineInstr &I, MachineRegisterInfo &MRI) const {
2443 unsigned NumElts = I.getNumOperands() - 1;
2444 unsigned SrcReg = I.getOperand(NumElts).getReg();
2445 const LLT NarrowTy = MRI.getType(I.getOperand(0).getReg());
2446 const LLT SrcTy = MRI.getType(SrcReg);
2447
2448 assert(NarrowTy.isVector() && "Expected an unmerge into vectors");
2449 if (SrcTy.getSizeInBits() > 128) {
2450 LLVM_DEBUG(dbgs() << "Unexpected vector type for vec split unmerge");
2451 return false;
Jessica Paquette607774c2019-03-11 22:18:01 +00002452 }
2453
Amara Emersond61b89b2019-03-14 22:48:18 +00002454 MachineIRBuilder MIB(I);
2455
2456 // We implement a split vector operation by treating the sub-vectors as
2457 // scalars and extracting them.
2458 const RegisterBank &DstRB =
2459 *RBI.getRegBank(I.getOperand(0).getReg(), MRI, TRI);
2460 for (unsigned OpIdx = 0; OpIdx < NumElts; ++OpIdx) {
2461 unsigned Dst = I.getOperand(OpIdx).getReg();
2462 MachineInstr *Extract =
2463 emitExtractVectorElt(Dst, DstRB, NarrowTy, SrcReg, OpIdx, MIB);
2464 if (!Extract)
Jessica Paquette607774c2019-03-11 22:18:01 +00002465 return false;
Jessica Paquette607774c2019-03-11 22:18:01 +00002466 }
Jessica Paquette607774c2019-03-11 22:18:01 +00002467 I.eraseFromParent();
2468 return true;
2469}
2470
Jessica Paquette245047d2019-01-24 22:00:41 +00002471bool AArch64InstructionSelector::selectUnmergeValues(
2472 MachineInstr &I, MachineRegisterInfo &MRI) const {
2473 assert(I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2474 "unexpected opcode");
2475
2476 // TODO: Handle unmerging into GPRs and from scalars to scalars.
2477 if (RBI.getRegBank(I.getOperand(0).getReg(), MRI, TRI)->getID() !=
2478 AArch64::FPRRegBankID ||
2479 RBI.getRegBank(I.getOperand(1).getReg(), MRI, TRI)->getID() !=
2480 AArch64::FPRRegBankID) {
2481 LLVM_DEBUG(dbgs() << "Unmerging vector-to-gpr and scalar-to-scalar "
2482 "currently unsupported.\n");
2483 return false;
2484 }
2485
2486 // The last operand is the vector source register, and every other operand is
2487 // a register to unpack into.
2488 unsigned NumElts = I.getNumOperands() - 1;
2489 unsigned SrcReg = I.getOperand(NumElts).getReg();
2490 const LLT NarrowTy = MRI.getType(I.getOperand(0).getReg());
2491 const LLT WideTy = MRI.getType(SrcReg);
Benjamin Kramer653020d2019-01-24 23:45:07 +00002492 (void)WideTy;
Jessica Paquette245047d2019-01-24 22:00:41 +00002493 assert(WideTy.isVector() && "can only unmerge from vector types!");
2494 assert(WideTy.getSizeInBits() > NarrowTy.getSizeInBits() &&
2495 "source register size too small!");
2496
Amara Emersond61b89b2019-03-14 22:48:18 +00002497 if (!NarrowTy.isScalar())
2498 return selectSplitVectorUnmerge(I, MRI);
Jessica Paquette245047d2019-01-24 22:00:41 +00002499
Amara Emerson3739a202019-03-15 21:59:50 +00002500 MachineIRBuilder MIB(I);
2501
Jessica Paquette245047d2019-01-24 22:00:41 +00002502 // Choose a lane copy opcode and subregister based off of the size of the
2503 // vector's elements.
2504 unsigned CopyOpc = 0;
2505 unsigned ExtractSubReg = 0;
Jessica Paquette607774c2019-03-11 22:18:01 +00002506 if (!getLaneCopyOpcode(CopyOpc, ExtractSubReg, NarrowTy.getSizeInBits()))
Jessica Paquette245047d2019-01-24 22:00:41 +00002507 return false;
Jessica Paquette245047d2019-01-24 22:00:41 +00002508
2509 // Set up for the lane copies.
2510 MachineBasicBlock &MBB = *I.getParent();
2511
2512 // Stores the registers we'll be copying from.
2513 SmallVector<unsigned, 4> InsertRegs;
2514
2515 // We'll use the first register twice, so we only need NumElts-1 registers.
2516 unsigned NumInsertRegs = NumElts - 1;
2517
2518 // If our elements fit into exactly 128 bits, then we can copy from the source
2519 // directly. Otherwise, we need to do a bit of setup with some subregister
2520 // inserts.
2521 if (NarrowTy.getSizeInBits() * NumElts == 128) {
2522 InsertRegs = SmallVector<unsigned, 4>(NumInsertRegs, SrcReg);
2523 } else {
2524 // No. We have to perform subregister inserts. For each insert, create an
2525 // implicit def and a subregister insert, and save the register we create.
2526 for (unsigned Idx = 0; Idx < NumInsertRegs; ++Idx) {
2527 unsigned ImpDefReg = MRI.createVirtualRegister(&AArch64::FPR128RegClass);
2528 MachineInstr &ImpDefMI =
2529 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(TargetOpcode::IMPLICIT_DEF),
2530 ImpDefReg);
2531
2532 // Now, create the subregister insert from SrcReg.
2533 unsigned InsertReg = MRI.createVirtualRegister(&AArch64::FPR128RegClass);
2534 MachineInstr &InsMI =
2535 *BuildMI(MBB, I, I.getDebugLoc(),
2536 TII.get(TargetOpcode::INSERT_SUBREG), InsertReg)
2537 .addUse(ImpDefReg)
2538 .addUse(SrcReg)
2539 .addImm(AArch64::dsub);
2540
2541 constrainSelectedInstRegOperands(ImpDefMI, TII, TRI, RBI);
2542 constrainSelectedInstRegOperands(InsMI, TII, TRI, RBI);
2543
2544 // Save the register so that we can copy from it after.
2545 InsertRegs.push_back(InsertReg);
2546 }
2547 }
2548
2549 // Now that we've created any necessary subregister inserts, we can
2550 // create the copies.
2551 //
2552 // Perform the first copy separately as a subregister copy.
2553 unsigned CopyTo = I.getOperand(0).getReg();
Amara Emerson86271782019-03-18 19:20:10 +00002554 auto FirstCopy = MIB.buildInstr(TargetOpcode::COPY, {CopyTo}, {})
2555 .addReg(InsertRegs[0], 0, ExtractSubReg);
Amara Emerson3739a202019-03-15 21:59:50 +00002556 constrainSelectedInstRegOperands(*FirstCopy, TII, TRI, RBI);
Jessica Paquette245047d2019-01-24 22:00:41 +00002557
2558 // Now, perform the remaining copies as vector lane copies.
2559 unsigned LaneIdx = 1;
2560 for (unsigned InsReg : InsertRegs) {
2561 unsigned CopyTo = I.getOperand(LaneIdx).getReg();
2562 MachineInstr &CopyInst =
2563 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CopyOpc), CopyTo)
2564 .addUse(InsReg)
2565 .addImm(LaneIdx);
2566 constrainSelectedInstRegOperands(CopyInst, TII, TRI, RBI);
2567 ++LaneIdx;
2568 }
2569
2570 // Separately constrain the first copy's destination. Because of the
2571 // limitation in constrainOperandRegClass, we can't guarantee that this will
2572 // actually be constrained. So, do it ourselves using the second operand.
2573 const TargetRegisterClass *RC =
2574 MRI.getRegClassOrNull(I.getOperand(1).getReg());
2575 if (!RC) {
2576 LLVM_DEBUG(dbgs() << "Couldn't constrain copy destination.\n");
2577 return false;
2578 }
2579
2580 RBI.constrainGenericRegister(CopyTo, *RC, MRI);
2581 I.eraseFromParent();
2582 return true;
2583}
2584
Amara Emerson2ff22982019-03-14 22:48:15 +00002585bool AArch64InstructionSelector::selectConcatVectors(
2586 MachineInstr &I, MachineRegisterInfo &MRI) const {
2587 assert(I.getOpcode() == TargetOpcode::G_CONCAT_VECTORS &&
2588 "Unexpected opcode");
2589 unsigned Dst = I.getOperand(0).getReg();
2590 unsigned Op1 = I.getOperand(1).getReg();
2591 unsigned Op2 = I.getOperand(2).getReg();
2592 MachineIRBuilder MIRBuilder(I);
2593 MachineInstr *ConcatMI = emitVectorConcat(Dst, Op1, Op2, MIRBuilder);
2594 if (!ConcatMI)
2595 return false;
2596 I.eraseFromParent();
2597 return true;
2598}
2599
Amara Emerson1abe05c2019-02-21 20:20:16 +00002600void AArch64InstructionSelector::collectShuffleMaskIndices(
2601 MachineInstr &I, MachineRegisterInfo &MRI,
Amara Emerson2806fd02019-04-12 21:31:21 +00002602 SmallVectorImpl<Optional<int>> &Idxs) const {
Amara Emerson1abe05c2019-02-21 20:20:16 +00002603 MachineInstr *MaskDef = MRI.getVRegDef(I.getOperand(3).getReg());
2604 assert(
2605 MaskDef->getOpcode() == TargetOpcode::G_BUILD_VECTOR &&
2606 "G_SHUFFLE_VECTOR should have a constant mask operand as G_BUILD_VECTOR");
2607 // Find the constant indices.
2608 for (unsigned i = 1, e = MaskDef->getNumOperands(); i < e; ++i) {
2609 MachineInstr *ScalarDef = MRI.getVRegDef(MaskDef->getOperand(i).getReg());
2610 assert(ScalarDef && "Could not find vreg def of shufflevec index op");
2611 // Look through copies.
2612 while (ScalarDef->getOpcode() == TargetOpcode::COPY) {
2613 ScalarDef = MRI.getVRegDef(ScalarDef->getOperand(1).getReg());
2614 assert(ScalarDef && "Could not find def of copy operand");
2615 }
Amara Emerson2806fd02019-04-12 21:31:21 +00002616 if (ScalarDef->getOpcode() != TargetOpcode::G_CONSTANT) {
2617 // This be an undef if not a constant.
2618 assert(ScalarDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
2619 Idxs.push_back(None);
2620 } else {
2621 Idxs.push_back(ScalarDef->getOperand(1).getCImm()->getSExtValue());
2622 }
Amara Emerson1abe05c2019-02-21 20:20:16 +00002623 }
2624}
2625
2626unsigned
2627AArch64InstructionSelector::emitConstantPoolEntry(Constant *CPVal,
2628 MachineFunction &MF) const {
Hans Wennborg5d5ee4a2019-04-26 08:31:00 +00002629 Type *CPTy = CPVal->getType();
Amara Emerson1abe05c2019-02-21 20:20:16 +00002630 unsigned Align = MF.getDataLayout().getPrefTypeAlignment(CPTy);
2631 if (Align == 0)
2632 Align = MF.getDataLayout().getTypeAllocSize(CPTy);
2633
2634 MachineConstantPool *MCP = MF.getConstantPool();
2635 return MCP->getConstantPoolIndex(CPVal, Align);
2636}
2637
2638MachineInstr *AArch64InstructionSelector::emitLoadFromConstantPool(
2639 Constant *CPVal, MachineIRBuilder &MIRBuilder) const {
2640 unsigned CPIdx = emitConstantPoolEntry(CPVal, MIRBuilder.getMF());
2641
2642 auto Adrp =
2643 MIRBuilder.buildInstr(AArch64::ADRP, {&AArch64::GPR64RegClass}, {})
2644 .addConstantPoolIndex(CPIdx, 0, AArch64II::MO_PAGE);
Amara Emerson8acb0d92019-03-04 19:16:00 +00002645
2646 MachineInstr *LoadMI = nullptr;
2647 switch (MIRBuilder.getDataLayout().getTypeStoreSize(CPVal->getType())) {
2648 case 16:
2649 LoadMI =
2650 &*MIRBuilder
2651 .buildInstr(AArch64::LDRQui, {&AArch64::FPR128RegClass}, {Adrp})
2652 .addConstantPoolIndex(CPIdx, 0,
2653 AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2654 break;
2655 case 8:
2656 LoadMI = &*MIRBuilder
2657 .buildInstr(AArch64::LDRDui, {&AArch64::FPR64RegClass}, {Adrp})
2658 .addConstantPoolIndex(
2659 CPIdx, 0, AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2660 break;
2661 default:
2662 LLVM_DEBUG(dbgs() << "Could not load from constant pool of type "
2663 << *CPVal->getType());
2664 return nullptr;
2665 }
Amara Emerson1abe05c2019-02-21 20:20:16 +00002666 constrainSelectedInstRegOperands(*Adrp, TII, TRI, RBI);
Amara Emerson8acb0d92019-03-04 19:16:00 +00002667 constrainSelectedInstRegOperands(*LoadMI, TII, TRI, RBI);
2668 return LoadMI;
2669}
2670
2671/// Return an <Opcode, SubregIndex> pair to do an vector elt insert of a given
2672/// size and RB.
2673static std::pair<unsigned, unsigned>
2674getInsertVecEltOpInfo(const RegisterBank &RB, unsigned EltSize) {
2675 unsigned Opc, SubregIdx;
2676 if (RB.getID() == AArch64::GPRRegBankID) {
2677 if (EltSize == 32) {
2678 Opc = AArch64::INSvi32gpr;
2679 SubregIdx = AArch64::ssub;
2680 } else if (EltSize == 64) {
2681 Opc = AArch64::INSvi64gpr;
2682 SubregIdx = AArch64::dsub;
2683 } else {
2684 llvm_unreachable("invalid elt size!");
2685 }
2686 } else {
2687 if (EltSize == 8) {
2688 Opc = AArch64::INSvi8lane;
2689 SubregIdx = AArch64::bsub;
2690 } else if (EltSize == 16) {
2691 Opc = AArch64::INSvi16lane;
2692 SubregIdx = AArch64::hsub;
2693 } else if (EltSize == 32) {
2694 Opc = AArch64::INSvi32lane;
2695 SubregIdx = AArch64::ssub;
2696 } else if (EltSize == 64) {
2697 Opc = AArch64::INSvi64lane;
2698 SubregIdx = AArch64::dsub;
2699 } else {
2700 llvm_unreachable("invalid elt size!");
2701 }
2702 }
2703 return std::make_pair(Opc, SubregIdx);
2704}
2705
2706MachineInstr *AArch64InstructionSelector::emitVectorConcat(
Amara Emerson2ff22982019-03-14 22:48:15 +00002707 Optional<unsigned> Dst, unsigned Op1, unsigned Op2,
2708 MachineIRBuilder &MIRBuilder) const {
Amara Emerson8acb0d92019-03-04 19:16:00 +00002709 // We implement a vector concat by:
2710 // 1. Use scalar_to_vector to insert the lower vector into the larger dest
2711 // 2. Insert the upper vector into the destination's upper element
2712 // TODO: some of this code is common with G_BUILD_VECTOR handling.
2713 MachineRegisterInfo &MRI = MIRBuilder.getMF().getRegInfo();
2714
2715 const LLT Op1Ty = MRI.getType(Op1);
2716 const LLT Op2Ty = MRI.getType(Op2);
2717
2718 if (Op1Ty != Op2Ty) {
2719 LLVM_DEBUG(dbgs() << "Could not do vector concat of differing vector tys");
2720 return nullptr;
2721 }
2722 assert(Op1Ty.isVector() && "Expected a vector for vector concat");
2723
2724 if (Op1Ty.getSizeInBits() >= 128) {
2725 LLVM_DEBUG(dbgs() << "Vector concat not supported for full size vectors");
2726 return nullptr;
2727 }
2728
2729 // At the moment we just support 64 bit vector concats.
2730 if (Op1Ty.getSizeInBits() != 64) {
2731 LLVM_DEBUG(dbgs() << "Vector concat supported for 64b vectors");
2732 return nullptr;
2733 }
2734
2735 const LLT ScalarTy = LLT::scalar(Op1Ty.getSizeInBits());
2736 const RegisterBank &FPRBank = *RBI.getRegBank(Op1, MRI, TRI);
2737 const TargetRegisterClass *DstRC =
2738 getMinClassForRegBank(FPRBank, Op1Ty.getSizeInBits() * 2);
2739
2740 MachineInstr *WidenedOp1 =
2741 emitScalarToVector(ScalarTy.getSizeInBits(), DstRC, Op1, MIRBuilder);
2742 MachineInstr *WidenedOp2 =
2743 emitScalarToVector(ScalarTy.getSizeInBits(), DstRC, Op2, MIRBuilder);
2744 if (!WidenedOp1 || !WidenedOp2) {
2745 LLVM_DEBUG(dbgs() << "Could not emit a vector from scalar value");
2746 return nullptr;
2747 }
2748
2749 // Now do the insert of the upper element.
2750 unsigned InsertOpc, InsSubRegIdx;
2751 std::tie(InsertOpc, InsSubRegIdx) =
2752 getInsertVecEltOpInfo(FPRBank, ScalarTy.getSizeInBits());
2753
Amara Emerson2ff22982019-03-14 22:48:15 +00002754 if (!Dst)
2755 Dst = MRI.createVirtualRegister(DstRC);
Amara Emerson8acb0d92019-03-04 19:16:00 +00002756 auto InsElt =
2757 MIRBuilder
Amara Emerson2ff22982019-03-14 22:48:15 +00002758 .buildInstr(InsertOpc, {*Dst}, {WidenedOp1->getOperand(0).getReg()})
Amara Emerson8acb0d92019-03-04 19:16:00 +00002759 .addImm(1) /* Lane index */
2760 .addUse(WidenedOp2->getOperand(0).getReg())
2761 .addImm(0);
Amara Emerson8acb0d92019-03-04 19:16:00 +00002762 constrainSelectedInstRegOperands(*InsElt, TII, TRI, RBI);
2763 return &*InsElt;
Amara Emerson1abe05c2019-02-21 20:20:16 +00002764}
2765
Jessica Paquettea3843fe2019-05-01 22:39:43 +00002766MachineInstr *AArch64InstructionSelector::emitFMovForFConstant(
2767 MachineInstr &I, MachineRegisterInfo &MRI) const {
2768 assert(I.getOpcode() == TargetOpcode::G_FCONSTANT &&
2769 "Expected a G_FCONSTANT!");
2770 MachineOperand &ImmOp = I.getOperand(1);
2771 unsigned DefSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
2772
2773 // Only handle 32 and 64 bit defs for now.
2774 if (DefSize != 32 && DefSize != 64)
2775 return nullptr;
2776
2777 // Don't handle null values using FMOV.
2778 if (ImmOp.getFPImm()->isNullValue())
2779 return nullptr;
2780
2781 // Get the immediate representation for the FMOV.
2782 const APFloat &ImmValAPF = ImmOp.getFPImm()->getValueAPF();
2783 int Imm = DefSize == 32 ? AArch64_AM::getFP32Imm(ImmValAPF)
2784 : AArch64_AM::getFP64Imm(ImmValAPF);
2785
2786 // If this is -1, it means the immediate can't be represented as the requested
2787 // floating point value. Bail.
2788 if (Imm == -1)
2789 return nullptr;
2790
2791 // Update MI to represent the new FMOV instruction, constrain it, and return.
2792 ImmOp.ChangeToImmediate(Imm);
2793 unsigned MovOpc = DefSize == 32 ? AArch64::FMOVSi : AArch64::FMOVDi;
2794 I.setDesc(TII.get(MovOpc));
2795 constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2796 return &I;
2797}
2798
Amara Emerson761ca2e2019-03-19 21:43:05 +00002799bool AArch64InstructionSelector::tryOptVectorDup(MachineInstr &I) const {
2800 // Try to match a vector splat operation into a dup instruction.
2801 // We're looking for this pattern:
2802 // %scalar:gpr(s64) = COPY $x0
2803 // %undef:fpr(<2 x s64>) = G_IMPLICIT_DEF
2804 // %cst0:gpr(s32) = G_CONSTANT i32 0
2805 // %zerovec:fpr(<2 x s32>) = G_BUILD_VECTOR %cst0(s32), %cst0(s32)
2806 // %ins:fpr(<2 x s64>) = G_INSERT_VECTOR_ELT %undef, %scalar(s64), %cst0(s32)
2807 // %splat:fpr(<2 x s64>) = G_SHUFFLE_VECTOR %ins(<2 x s64>), %undef,
2808 // %zerovec(<2 x s32>)
2809 //
2810 // ...into:
2811 // %splat = DUP %scalar
2812 // We use the regbank of the scalar to determine which kind of dup to use.
2813 MachineIRBuilder MIB(I);
2814 MachineRegisterInfo &MRI = *MIB.getMRI();
2815 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
2816 using namespace TargetOpcode;
2817 using namespace MIPatternMatch;
2818
2819 // Begin matching the insert.
2820 auto *InsMI =
2821 findMIFromReg(I.getOperand(1).getReg(), G_INSERT_VECTOR_ELT, MIB);
2822 if (!InsMI)
2823 return false;
2824 // Match the undef vector operand.
2825 auto *UndefMI =
2826 findMIFromReg(InsMI->getOperand(1).getReg(), G_IMPLICIT_DEF, MIB);
2827 if (!UndefMI)
2828 return false;
2829 // Match the scalar being splatted.
2830 unsigned ScalarReg = InsMI->getOperand(2).getReg();
2831 const RegisterBank *ScalarRB = RBI.getRegBank(ScalarReg, MRI, TRI);
2832 // Match the index constant 0.
2833 int64_t Index = 0;
2834 if (!mi_match(InsMI->getOperand(3).getReg(), MRI, m_ICst(Index)) || Index)
2835 return false;
2836
2837 // The shuffle's second operand doesn't matter if the mask is all zero.
2838 auto *ZeroVec = findMIFromReg(I.getOperand(3).getReg(), G_BUILD_VECTOR, MIB);
2839 if (!ZeroVec)
2840 return false;
2841 int64_t Zero = 0;
2842 if (!mi_match(ZeroVec->getOperand(1).getReg(), MRI, m_ICst(Zero)) || Zero)
2843 return false;
2844 for (unsigned i = 1, e = ZeroVec->getNumOperands() - 1; i < e; ++i) {
2845 if (ZeroVec->getOperand(i).getReg() != ZeroVec->getOperand(1).getReg())
2846 return false; // This wasn't an all zeros vector.
2847 }
2848
2849 // We're done, now find out what kind of splat we need.
2850 LLT VecTy = MRI.getType(I.getOperand(0).getReg());
2851 LLT EltTy = VecTy.getElementType();
2852 if (VecTy.getSizeInBits() != 128 || EltTy.getSizeInBits() < 32) {
2853 LLVM_DEBUG(dbgs() << "Could not optimize splat pattern < 128b yet");
2854 return false;
2855 }
2856 bool IsFP = ScalarRB->getID() == AArch64::FPRRegBankID;
2857 static const unsigned OpcTable[2][2] = {
2858 {AArch64::DUPv4i32gpr, AArch64::DUPv2i64gpr},
2859 {AArch64::DUPv4i32lane, AArch64::DUPv2i64lane}};
2860 unsigned Opc = OpcTable[IsFP][EltTy.getSizeInBits() == 64];
2861
2862 // For FP splats, we need to widen the scalar reg via undef too.
2863 if (IsFP) {
2864 MachineInstr *Widen = emitScalarToVector(
2865 EltTy.getSizeInBits(), &AArch64::FPR128RegClass, ScalarReg, MIB);
2866 if (!Widen)
2867 return false;
2868 ScalarReg = Widen->getOperand(0).getReg();
2869 }
2870 auto Dup = MIB.buildInstr(Opc, {I.getOperand(0).getReg()}, {ScalarReg});
2871 if (IsFP)
2872 Dup.addImm(0);
2873 constrainSelectedInstRegOperands(*Dup, TII, TRI, RBI);
2874 I.eraseFromParent();
2875 return true;
2876}
2877
2878bool AArch64InstructionSelector::tryOptVectorShuffle(MachineInstr &I) const {
2879 if (TM.getOptLevel() == CodeGenOpt::None)
2880 return false;
2881 if (tryOptVectorDup(I))
2882 return true;
2883 return false;
2884}
2885
Amara Emerson1abe05c2019-02-21 20:20:16 +00002886bool AArch64InstructionSelector::selectShuffleVector(
2887 MachineInstr &I, MachineRegisterInfo &MRI) const {
Amara Emerson761ca2e2019-03-19 21:43:05 +00002888 if (tryOptVectorShuffle(I))
2889 return true;
Amara Emerson1abe05c2019-02-21 20:20:16 +00002890 const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
2891 unsigned Src1Reg = I.getOperand(1).getReg();
2892 const LLT Src1Ty = MRI.getType(Src1Reg);
2893 unsigned Src2Reg = I.getOperand(2).getReg();
2894 const LLT Src2Ty = MRI.getType(Src2Reg);
2895
2896 MachineBasicBlock &MBB = *I.getParent();
2897 MachineFunction &MF = *MBB.getParent();
2898 LLVMContext &Ctx = MF.getFunction().getContext();
2899
2900 // G_SHUFFLE_VECTOR doesn't really have a strictly enforced constant mask
2901 // operand, it comes in as a normal vector value which we have to analyze to
Amara Emerson2806fd02019-04-12 21:31:21 +00002902 // find the mask indices. If the mask element is undef, then
2903 // collectShuffleMaskIndices() will add a None entry for that index into
2904 // the list.
2905 SmallVector<Optional<int>, 8> Mask;
Amara Emerson1abe05c2019-02-21 20:20:16 +00002906 collectShuffleMaskIndices(I, MRI, Mask);
2907 assert(!Mask.empty() && "Expected to find mask indices");
2908
2909 // G_SHUFFLE_VECTOR is weird in that the source operands can be scalars, if
2910 // it's originated from a <1 x T> type. Those should have been lowered into
2911 // G_BUILD_VECTOR earlier.
2912 if (!Src1Ty.isVector() || !Src2Ty.isVector()) {
2913 LLVM_DEBUG(dbgs() << "Could not select a \"scalar\" G_SHUFFLE_VECTOR\n");
2914 return false;
2915 }
2916
2917 unsigned BytesPerElt = DstTy.getElementType().getSizeInBits() / 8;
2918
2919 SmallVector<Constant *, 64> CstIdxs;
Amara Emerson2806fd02019-04-12 21:31:21 +00002920 for (auto &MaybeVal : Mask) {
2921 // For now, any undef indexes we'll just assume to be 0. This should be
2922 // optimized in future, e.g. to select DUP etc.
2923 int Val = MaybeVal.hasValue() ? *MaybeVal : 0;
Amara Emerson1abe05c2019-02-21 20:20:16 +00002924 for (unsigned Byte = 0; Byte < BytesPerElt; ++Byte) {
2925 unsigned Offset = Byte + Val * BytesPerElt;
2926 CstIdxs.emplace_back(ConstantInt::get(Type::getInt8Ty(Ctx), Offset));
2927 }
2928 }
2929
Amara Emerson8acb0d92019-03-04 19:16:00 +00002930 MachineIRBuilder MIRBuilder(I);
Amara Emerson1abe05c2019-02-21 20:20:16 +00002931
2932 // Use a constant pool to load the index vector for TBL.
2933 Constant *CPVal = ConstantVector::get(CstIdxs);
Amara Emerson1abe05c2019-02-21 20:20:16 +00002934 MachineInstr *IndexLoad = emitLoadFromConstantPool(CPVal, MIRBuilder);
2935 if (!IndexLoad) {
2936 LLVM_DEBUG(dbgs() << "Could not load from a constant pool");
2937 return false;
2938 }
2939
Amara Emerson8acb0d92019-03-04 19:16:00 +00002940 if (DstTy.getSizeInBits() != 128) {
2941 assert(DstTy.getSizeInBits() == 64 && "Unexpected shuffle result ty");
2942 // This case can be done with TBL1.
Amara Emerson2ff22982019-03-14 22:48:15 +00002943 MachineInstr *Concat = emitVectorConcat(None, Src1Reg, Src2Reg, MIRBuilder);
Amara Emerson8acb0d92019-03-04 19:16:00 +00002944 if (!Concat) {
2945 LLVM_DEBUG(dbgs() << "Could not do vector concat for tbl1");
2946 return false;
2947 }
2948
2949 // The constant pool load will be 64 bits, so need to convert to FPR128 reg.
2950 IndexLoad =
2951 emitScalarToVector(64, &AArch64::FPR128RegClass,
2952 IndexLoad->getOperand(0).getReg(), MIRBuilder);
2953
2954 auto TBL1 = MIRBuilder.buildInstr(
2955 AArch64::TBLv16i8One, {&AArch64::FPR128RegClass},
2956 {Concat->getOperand(0).getReg(), IndexLoad->getOperand(0).getReg()});
2957 constrainSelectedInstRegOperands(*TBL1, TII, TRI, RBI);
2958
Amara Emerson3739a202019-03-15 21:59:50 +00002959 auto Copy =
Amara Emerson86271782019-03-18 19:20:10 +00002960 MIRBuilder
2961 .buildInstr(TargetOpcode::COPY, {I.getOperand(0).getReg()}, {})
2962 .addReg(TBL1.getReg(0), 0, AArch64::dsub);
Amara Emerson8acb0d92019-03-04 19:16:00 +00002963 RBI.constrainGenericRegister(Copy.getReg(0), AArch64::FPR64RegClass, MRI);
2964 I.eraseFromParent();
2965 return true;
2966 }
2967
Amara Emerson1abe05c2019-02-21 20:20:16 +00002968 // For TBL2 we need to emit a REG_SEQUENCE to tie together two consecutive
2969 // Q registers for regalloc.
2970 auto RegSeq = MIRBuilder
2971 .buildInstr(TargetOpcode::REG_SEQUENCE,
2972 {&AArch64::QQRegClass}, {Src1Reg})
2973 .addImm(AArch64::qsub0)
2974 .addUse(Src2Reg)
2975 .addImm(AArch64::qsub1);
2976
2977 auto TBL2 =
2978 MIRBuilder.buildInstr(AArch64::TBLv16i8Two, {I.getOperand(0).getReg()},
2979 {RegSeq, IndexLoad->getOperand(0).getReg()});
2980 constrainSelectedInstRegOperands(*RegSeq, TII, TRI, RBI);
2981 constrainSelectedInstRegOperands(*TBL2, TII, TRI, RBI);
2982 I.eraseFromParent();
2983 return true;
2984}
2985
Jessica Paquette16d67a32019-03-13 23:22:23 +00002986MachineInstr *AArch64InstructionSelector::emitLaneInsert(
2987 Optional<unsigned> DstReg, unsigned SrcReg, unsigned EltReg,
2988 unsigned LaneIdx, const RegisterBank &RB,
2989 MachineIRBuilder &MIRBuilder) const {
2990 MachineInstr *InsElt = nullptr;
2991 const TargetRegisterClass *DstRC = &AArch64::FPR128RegClass;
2992 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
2993
2994 // Create a register to define with the insert if one wasn't passed in.
2995 if (!DstReg)
2996 DstReg = MRI.createVirtualRegister(DstRC);
2997
2998 unsigned EltSize = MRI.getType(EltReg).getSizeInBits();
2999 unsigned Opc = getInsertVecEltOpInfo(RB, EltSize).first;
3000
3001 if (RB.getID() == AArch64::FPRRegBankID) {
3002 auto InsSub = emitScalarToVector(EltSize, DstRC, EltReg, MIRBuilder);
3003 InsElt = MIRBuilder.buildInstr(Opc, {*DstReg}, {SrcReg})
3004 .addImm(LaneIdx)
3005 .addUse(InsSub->getOperand(0).getReg())
3006 .addImm(0);
3007 } else {
3008 InsElt = MIRBuilder.buildInstr(Opc, {*DstReg}, {SrcReg})
3009 .addImm(LaneIdx)
3010 .addUse(EltReg);
3011 }
3012
3013 constrainSelectedInstRegOperands(*InsElt, TII, TRI, RBI);
3014 return InsElt;
3015}
3016
Jessica Paquette5aff1f42019-03-14 18:01:30 +00003017bool AArch64InstructionSelector::selectInsertElt(
3018 MachineInstr &I, MachineRegisterInfo &MRI) const {
3019 assert(I.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT);
3020
3021 // Get information on the destination.
3022 unsigned DstReg = I.getOperand(0).getReg();
3023 const LLT DstTy = MRI.getType(DstReg);
Jessica Paquetted3ffd472019-03-29 21:39:36 +00003024 unsigned VecSize = DstTy.getSizeInBits();
Jessica Paquette5aff1f42019-03-14 18:01:30 +00003025
3026 // Get information on the element we want to insert into the destination.
3027 unsigned EltReg = I.getOperand(2).getReg();
3028 const LLT EltTy = MRI.getType(EltReg);
3029 unsigned EltSize = EltTy.getSizeInBits();
3030 if (EltSize < 16 || EltSize > 64)
3031 return false; // Don't support all element types yet.
3032
3033 // Find the definition of the index. Bail out if it's not defined by a
3034 // G_CONSTANT.
3035 unsigned IdxReg = I.getOperand(3).getReg();
Jessica Paquette76f64b62019-04-26 21:53:13 +00003036 auto VRegAndVal = getConstantVRegValWithLookThrough(IdxReg, MRI);
3037 if (!VRegAndVal)
Jessica Paquette5aff1f42019-03-14 18:01:30 +00003038 return false;
Jessica Paquette76f64b62019-04-26 21:53:13 +00003039 unsigned LaneIdx = VRegAndVal->Value;
Jessica Paquette5aff1f42019-03-14 18:01:30 +00003040
3041 // Perform the lane insert.
3042 unsigned SrcReg = I.getOperand(1).getReg();
3043 const RegisterBank &EltRB = *RBI.getRegBank(EltReg, MRI, TRI);
3044 MachineIRBuilder MIRBuilder(I);
Jessica Paquetted3ffd472019-03-29 21:39:36 +00003045
3046 if (VecSize < 128) {
3047 // If the vector we're inserting into is smaller than 128 bits, widen it
3048 // to 128 to do the insert.
3049 MachineInstr *ScalarToVec = emitScalarToVector(
3050 VecSize, &AArch64::FPR128RegClass, SrcReg, MIRBuilder);
3051 if (!ScalarToVec)
3052 return false;
3053 SrcReg = ScalarToVec->getOperand(0).getReg();
3054 }
3055
3056 // Create an insert into a new FPR128 register.
3057 // Note that if our vector is already 128 bits, we end up emitting an extra
3058 // register.
3059 MachineInstr *InsMI =
3060 emitLaneInsert(None, SrcReg, EltReg, LaneIdx, EltRB, MIRBuilder);
3061
3062 if (VecSize < 128) {
3063 // If we had to widen to perform the insert, then we have to demote back to
3064 // the original size to get the result we want.
3065 unsigned DemoteVec = InsMI->getOperand(0).getReg();
3066 const TargetRegisterClass *RC =
3067 getMinClassForRegBank(*RBI.getRegBank(DemoteVec, MRI, TRI), VecSize);
3068 if (RC != &AArch64::FPR32RegClass && RC != &AArch64::FPR64RegClass) {
3069 LLVM_DEBUG(dbgs() << "Unsupported register class!\n");
3070 return false;
3071 }
3072 unsigned SubReg = 0;
3073 if (!getSubRegForClass(RC, TRI, SubReg))
3074 return false;
3075 if (SubReg != AArch64::ssub && SubReg != AArch64::dsub) {
3076 LLVM_DEBUG(dbgs() << "Unsupported destination size! (" << VecSize
3077 << "\n");
3078 return false;
3079 }
3080 MIRBuilder.buildInstr(TargetOpcode::COPY, {DstReg}, {})
3081 .addReg(DemoteVec, 0, SubReg);
3082 RBI.constrainGenericRegister(DstReg, *RC, MRI);
3083 } else {
3084 // No widening needed.
3085 InsMI->getOperand(0).setReg(DstReg);
3086 constrainSelectedInstRegOperands(*InsMI, TII, TRI, RBI);
3087 }
3088
Jessica Paquette5aff1f42019-03-14 18:01:30 +00003089 I.eraseFromParent();
3090 return true;
3091}
3092
Amara Emerson5ec14602018-12-10 18:44:58 +00003093bool AArch64InstructionSelector::selectBuildVector(
3094 MachineInstr &I, MachineRegisterInfo &MRI) const {
3095 assert(I.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
3096 // Until we port more of the optimized selections, for now just use a vector
3097 // insert sequence.
3098 const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
3099 const LLT EltTy = MRI.getType(I.getOperand(1).getReg());
3100 unsigned EltSize = EltTy.getSizeInBits();
Jessica Paquette245047d2019-01-24 22:00:41 +00003101 if (EltSize < 16 || EltSize > 64)
Amara Emerson5ec14602018-12-10 18:44:58 +00003102 return false; // Don't support all element types yet.
3103 const RegisterBank &RB = *RBI.getRegBank(I.getOperand(1).getReg(), MRI, TRI);
Amara Emerson6bcfa1c2019-02-25 18:52:54 +00003104 MachineIRBuilder MIRBuilder(I);
Jessica Paquette245047d2019-01-24 22:00:41 +00003105
3106 const TargetRegisterClass *DstRC = &AArch64::FPR128RegClass;
Amara Emerson6bcfa1c2019-02-25 18:52:54 +00003107 MachineInstr *ScalarToVec =
Amara Emerson8acb0d92019-03-04 19:16:00 +00003108 emitScalarToVector(DstTy.getElementType().getSizeInBits(), DstRC,
3109 I.getOperand(1).getReg(), MIRBuilder);
Amara Emerson6bcfa1c2019-02-25 18:52:54 +00003110 if (!ScalarToVec)
Jessica Paquette245047d2019-01-24 22:00:41 +00003111 return false;
3112
Amara Emerson6bcfa1c2019-02-25 18:52:54 +00003113 unsigned DstVec = ScalarToVec->getOperand(0).getReg();
Jessica Paquette245047d2019-01-24 22:00:41 +00003114 unsigned DstSize = DstTy.getSizeInBits();
3115
3116 // Keep track of the last MI we inserted. Later on, we might be able to save
3117 // a copy using it.
3118 MachineInstr *PrevMI = nullptr;
3119 for (unsigned i = 2, e = DstSize / EltSize + 1; i < e; ++i) {
Jessica Paquette16d67a32019-03-13 23:22:23 +00003120 // Note that if we don't do a subregister copy, we can end up making an
3121 // extra register.
3122 PrevMI = &*emitLaneInsert(None, DstVec, I.getOperand(i).getReg(), i - 1, RB,
3123 MIRBuilder);
3124 DstVec = PrevMI->getOperand(0).getReg();
Amara Emerson5ec14602018-12-10 18:44:58 +00003125 }
Jessica Paquette245047d2019-01-24 22:00:41 +00003126
3127 // If DstTy's size in bits is less than 128, then emit a subregister copy
3128 // from DstVec to the last register we've defined.
3129 if (DstSize < 128) {
Jessica Paquette85ace622019-03-13 23:29:54 +00003130 // Force this to be FPR using the destination vector.
3131 const TargetRegisterClass *RC =
3132 getMinClassForRegBank(*RBI.getRegBank(DstVec, MRI, TRI), DstSize);
Jessica Paquette245047d2019-01-24 22:00:41 +00003133 if (!RC)
3134 return false;
Jessica Paquette85ace622019-03-13 23:29:54 +00003135 if (RC != &AArch64::FPR32RegClass && RC != &AArch64::FPR64RegClass) {
3136 LLVM_DEBUG(dbgs() << "Unsupported register class!\n");
3137 return false;
3138 }
3139
3140 unsigned SubReg = 0;
3141 if (!getSubRegForClass(RC, TRI, SubReg))
3142 return false;
3143 if (SubReg != AArch64::ssub && SubReg != AArch64::dsub) {
3144 LLVM_DEBUG(dbgs() << "Unsupported destination size! (" << DstSize
3145 << "\n");
3146 return false;
3147 }
Jessica Paquette245047d2019-01-24 22:00:41 +00003148
3149 unsigned Reg = MRI.createVirtualRegister(RC);
3150 unsigned DstReg = I.getOperand(0).getReg();
3151
Amara Emerson86271782019-03-18 19:20:10 +00003152 MIRBuilder.buildInstr(TargetOpcode::COPY, {DstReg}, {})
3153 .addReg(DstVec, 0, SubReg);
Jessica Paquette245047d2019-01-24 22:00:41 +00003154 MachineOperand &RegOp = I.getOperand(1);
3155 RegOp.setReg(Reg);
3156 RBI.constrainGenericRegister(DstReg, *RC, MRI);
3157 } else {
3158 // We don't need a subregister copy. Save a copy by re-using the
3159 // destination register on the final insert.
3160 assert(PrevMI && "PrevMI was null?");
3161 PrevMI->getOperand(0).setReg(I.getOperand(0).getReg());
3162 constrainSelectedInstRegOperands(*PrevMI, TII, TRI, RBI);
3163 }
3164
Amara Emerson5ec14602018-12-10 18:44:58 +00003165 I.eraseFromParent();
3166 return true;
3167}
3168
Jessica Paquette7f6fe7c2019-04-29 20:58:17 +00003169/// Helper function to find an intrinsic ID on an a MachineInstr. Returns the
3170/// ID if it exists, and 0 otherwise.
3171static unsigned findIntrinsicID(MachineInstr &I) {
3172 auto IntrinOp = find_if(I.operands(), [&](const MachineOperand &Op) {
3173 return Op.isIntrinsicID();
3174 });
3175 if (IntrinOp == I.operands_end())
3176 return 0;
3177 return IntrinOp->getIntrinsicID();
3178}
3179
Jessica Paquette22c62152019-04-02 19:57:26 +00003180/// Helper function to emit the correct opcode for a llvm.aarch64.stlxr
3181/// intrinsic.
3182static unsigned getStlxrOpcode(unsigned NumBytesToStore) {
3183 switch (NumBytesToStore) {
3184 // TODO: 1, 2, and 4 byte stores.
3185 case 8:
3186 return AArch64::STLXRX;
3187 default:
3188 LLVM_DEBUG(dbgs() << "Unexpected number of bytes to store! ("
3189 << NumBytesToStore << ")\n");
3190 break;
3191 }
3192 return 0;
3193}
3194
3195bool AArch64InstructionSelector::selectIntrinsicWithSideEffects(
3196 MachineInstr &I, MachineRegisterInfo &MRI) const {
3197 // Find the intrinsic ID.
Jessica Paquette7f6fe7c2019-04-29 20:58:17 +00003198 unsigned IntrinID = findIntrinsicID(I);
3199 if (!IntrinID)
Jessica Paquette22c62152019-04-02 19:57:26 +00003200 return false;
Jessica Paquette22c62152019-04-02 19:57:26 +00003201 MachineIRBuilder MIRBuilder(I);
3202
3203 // Select the instruction.
3204 switch (IntrinID) {
3205 default:
3206 return false;
3207 case Intrinsic::trap:
3208 MIRBuilder.buildInstr(AArch64::BRK, {}, {}).addImm(1);
3209 break;
3210 case Intrinsic::aarch64_stlxr:
3211 unsigned StatReg = I.getOperand(0).getReg();
3212 assert(RBI.getSizeInBits(StatReg, MRI, TRI) == 32 &&
3213 "Status register must be 32 bits!");
3214 unsigned SrcReg = I.getOperand(2).getReg();
3215
3216 if (RBI.getSizeInBits(SrcReg, MRI, TRI) != 64) {
3217 LLVM_DEBUG(dbgs() << "Only support 64-bit sources right now.\n");
3218 return false;
3219 }
3220
3221 unsigned PtrReg = I.getOperand(3).getReg();
3222 assert(MRI.getType(PtrReg).isPointer() && "Expected pointer operand");
3223
3224 // Expect only one memory operand.
3225 if (!I.hasOneMemOperand())
3226 return false;
3227
3228 const MachineMemOperand *MemOp = *I.memoperands_begin();
3229 unsigned NumBytesToStore = MemOp->getSize();
3230 unsigned Opc = getStlxrOpcode(NumBytesToStore);
3231 if (!Opc)
3232 return false;
3233
3234 auto StoreMI = MIRBuilder.buildInstr(Opc, {StatReg}, {SrcReg, PtrReg});
3235 constrainSelectedInstRegOperands(*StoreMI, TII, TRI, RBI);
3236 }
3237
3238 I.eraseFromParent();
3239 return true;
3240}
3241
Jessica Paquette7f6fe7c2019-04-29 20:58:17 +00003242bool AArch64InstructionSelector::selectIntrinsic(
3243 MachineInstr &I, MachineRegisterInfo &MRI) const {
3244 unsigned IntrinID = findIntrinsicID(I);
3245 if (!IntrinID)
3246 return false;
3247 MachineIRBuilder MIRBuilder(I);
3248
3249 switch (IntrinID) {
3250 default:
3251 break;
3252 case Intrinsic::aarch64_crypto_sha1h:
3253 unsigned DstReg = I.getOperand(0).getReg();
3254 unsigned SrcReg = I.getOperand(2).getReg();
3255
3256 // FIXME: Should this be an assert?
3257 if (MRI.getType(DstReg).getSizeInBits() != 32 ||
3258 MRI.getType(SrcReg).getSizeInBits() != 32)
3259 return false;
3260
3261 // The operation has to happen on FPRs. Set up some new FPR registers for
3262 // the source and destination if they are on GPRs.
3263 if (RBI.getRegBank(SrcReg, MRI, TRI)->getID() != AArch64::FPRRegBankID) {
3264 SrcReg = MRI.createVirtualRegister(&AArch64::FPR32RegClass);
3265 MIRBuilder.buildCopy({SrcReg}, {I.getOperand(2)});
3266
3267 // Make sure the copy ends up getting constrained properly.
3268 RBI.constrainGenericRegister(I.getOperand(2).getReg(),
3269 AArch64::GPR32RegClass, MRI);
3270 }
3271
3272 if (RBI.getRegBank(DstReg, MRI, TRI)->getID() != AArch64::FPRRegBankID)
3273 DstReg = MRI.createVirtualRegister(&AArch64::FPR32RegClass);
3274
3275 // Actually insert the instruction.
3276 auto SHA1Inst = MIRBuilder.buildInstr(AArch64::SHA1Hrr, {DstReg}, {SrcReg});
3277 constrainSelectedInstRegOperands(*SHA1Inst, TII, TRI, RBI);
3278
3279 // Did we create a new register for the destination?
3280 if (DstReg != I.getOperand(0).getReg()) {
3281 // Yep. Copy the result of the instruction back into the original
3282 // destination.
3283 MIRBuilder.buildCopy({I.getOperand(0)}, {DstReg});
3284 RBI.constrainGenericRegister(I.getOperand(0).getReg(),
3285 AArch64::GPR32RegClass, MRI);
3286 }
3287
3288 I.eraseFromParent();
3289 return true;
3290 }
3291 return false;
3292}
3293
Daniel Sanders8a4bae92017-03-14 21:32:08 +00003294/// SelectArithImmed - Select an immediate value that can be represented as
3295/// a 12-bit value shifted left by either 0 or 12. If so, return true with
3296/// Val set to the 12-bit value and Shift set to the shifter operand.
Daniel Sanders1e4569f2017-10-20 20:55:29 +00003297InstructionSelector::ComplexRendererFns
Daniel Sanders2deea182017-04-22 15:11:04 +00003298AArch64InstructionSelector::selectArithImmed(MachineOperand &Root) const {
Daniel Sanders8a4bae92017-03-14 21:32:08 +00003299 MachineInstr &MI = *Root.getParent();
3300 MachineBasicBlock &MBB = *MI.getParent();
3301 MachineFunction &MF = *MBB.getParent();
3302 MachineRegisterInfo &MRI = MF.getRegInfo();
3303
3304 // This function is called from the addsub_shifted_imm ComplexPattern,
3305 // which lists [imm] as the list of opcode it's interested in, however
3306 // we still need to check whether the operand is actually an immediate
3307 // here because the ComplexPattern opcode list is only used in
3308 // root-level opcode matching.
3309 uint64_t Immed;
3310 if (Root.isImm())
3311 Immed = Root.getImm();
3312 else if (Root.isCImm())
3313 Immed = Root.getCImm()->getZExtValue();
3314 else if (Root.isReg()) {
3315 MachineInstr *Def = MRI.getVRegDef(Root.getReg());
3316 if (Def->getOpcode() != TargetOpcode::G_CONSTANT)
Daniel Sandersdf39cba2017-10-15 18:22:54 +00003317 return None;
Daniel Sanders0e642022017-03-16 18:04:50 +00003318 MachineOperand &Op1 = Def->getOperand(1);
3319 if (!Op1.isCImm() || Op1.getCImm()->getBitWidth() > 64)
Daniel Sandersdf39cba2017-10-15 18:22:54 +00003320 return None;
Daniel Sanders0e642022017-03-16 18:04:50 +00003321 Immed = Op1.getCImm()->getZExtValue();
Daniel Sanders8a4bae92017-03-14 21:32:08 +00003322 } else
Daniel Sandersdf39cba2017-10-15 18:22:54 +00003323 return None;
Daniel Sanders8a4bae92017-03-14 21:32:08 +00003324
3325 unsigned ShiftAmt;
3326
3327 if (Immed >> 12 == 0) {
3328 ShiftAmt = 0;
3329 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
3330 ShiftAmt = 12;
3331 Immed = Immed >> 12;
3332 } else
Daniel Sandersdf39cba2017-10-15 18:22:54 +00003333 return None;
Daniel Sanders8a4bae92017-03-14 21:32:08 +00003334
3335 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
Daniel Sandersdf39cba2017-10-15 18:22:54 +00003336 return {{
3337 [=](MachineInstrBuilder &MIB) { MIB.addImm(Immed); },
3338 [=](MachineInstrBuilder &MIB) { MIB.addImm(ShVal); },
3339 }};
Daniel Sanders8a4bae92017-03-14 21:32:08 +00003340}
Daniel Sanders0b5293f2017-04-06 09:49:34 +00003341
Daniel Sandersea8711b2017-10-16 03:36:29 +00003342/// Select a "register plus unscaled signed 9-bit immediate" address. This
3343/// should only match when there is an offset that is not valid for a scaled
3344/// immediate addressing mode. The "Size" argument is the size in bytes of the
3345/// memory reference, which is needed here to know what is valid for a scaled
3346/// immediate.
Daniel Sanders1e4569f2017-10-20 20:55:29 +00003347InstructionSelector::ComplexRendererFns
Daniel Sandersea8711b2017-10-16 03:36:29 +00003348AArch64InstructionSelector::selectAddrModeUnscaled(MachineOperand &Root,
3349 unsigned Size) const {
3350 MachineRegisterInfo &MRI =
3351 Root.getParent()->getParent()->getParent()->getRegInfo();
3352
3353 if (!Root.isReg())
3354 return None;
3355
3356 if (!isBaseWithConstantOffset(Root, MRI))
3357 return None;
3358
3359 MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
3360 if (!RootDef)
3361 return None;
3362
3363 MachineOperand &OffImm = RootDef->getOperand(2);
3364 if (!OffImm.isReg())
3365 return None;
3366 MachineInstr *RHS = MRI.getVRegDef(OffImm.getReg());
3367 if (!RHS || RHS->getOpcode() != TargetOpcode::G_CONSTANT)
3368 return None;
3369 int64_t RHSC;
3370 MachineOperand &RHSOp1 = RHS->getOperand(1);
3371 if (!RHSOp1.isCImm() || RHSOp1.getCImm()->getBitWidth() > 64)
3372 return None;
3373 RHSC = RHSOp1.getCImm()->getSExtValue();
3374
3375 // If the offset is valid as a scaled immediate, don't match here.
3376 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Log2_32(Size)))
3377 return None;
3378 if (RHSC >= -256 && RHSC < 256) {
3379 MachineOperand &Base = RootDef->getOperand(1);
3380 return {{
3381 [=](MachineInstrBuilder &MIB) { MIB.add(Base); },
3382 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
3383 }};
3384 }
3385 return None;
3386}
3387
3388/// Select a "register plus scaled unsigned 12-bit immediate" address. The
3389/// "Size" argument is the size in bytes of the memory reference, which
3390/// determines the scale.
Daniel Sanders1e4569f2017-10-20 20:55:29 +00003391InstructionSelector::ComplexRendererFns
Daniel Sandersea8711b2017-10-16 03:36:29 +00003392AArch64InstructionSelector::selectAddrModeIndexed(MachineOperand &Root,
3393 unsigned Size) const {
3394 MachineRegisterInfo &MRI =
3395 Root.getParent()->getParent()->getParent()->getRegInfo();
3396
3397 if (!Root.isReg())
3398 return None;
3399
3400 MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
3401 if (!RootDef)
3402 return None;
3403
3404 if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
3405 return {{
3406 [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
3407 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
3408 }};
3409 }
3410
3411 if (isBaseWithConstantOffset(Root, MRI)) {
3412 MachineOperand &LHS = RootDef->getOperand(1);
3413 MachineOperand &RHS = RootDef->getOperand(2);
3414 MachineInstr *LHSDef = MRI.getVRegDef(LHS.getReg());
3415 MachineInstr *RHSDef = MRI.getVRegDef(RHS.getReg());
3416 if (LHSDef && RHSDef) {
3417 int64_t RHSC = (int64_t)RHSDef->getOperand(1).getCImm()->getZExtValue();
3418 unsigned Scale = Log2_32(Size);
3419 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
3420 if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
Daniel Sanders01805b62017-10-16 05:39:30 +00003421 return {{
3422 [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); },
3423 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC >> Scale); },
3424 }};
3425
Daniel Sandersea8711b2017-10-16 03:36:29 +00003426 return {{
3427 [=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
3428 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC >> Scale); },
3429 }};
3430 }
3431 }
3432 }
3433
3434 // Before falling back to our general case, check if the unscaled
3435 // instructions can handle this. If so, that's preferable.
3436 if (selectAddrModeUnscaled(Root, Size).hasValue())
3437 return None;
3438
3439 return {{
3440 [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
3441 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
3442 }};
3443}
3444
Volkan Kelesf7f25682018-01-16 18:44:05 +00003445void AArch64InstructionSelector::renderTruncImm(MachineInstrBuilder &MIB,
3446 const MachineInstr &MI) const {
3447 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
3448 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
3449 Optional<int64_t> CstVal = getConstantVRegVal(MI.getOperand(0).getReg(), MRI);
3450 assert(CstVal && "Expected constant value");
3451 MIB.addImm(CstVal.getValue());
3452}
3453
Daniel Sanders0b5293f2017-04-06 09:49:34 +00003454namespace llvm {
3455InstructionSelector *
3456createAArch64InstructionSelector(const AArch64TargetMachine &TM,
3457 AArch64Subtarget &Subtarget,
3458 AArch64RegisterBankInfo &RBI) {
3459 return new AArch64InstructionSelector(TM, Subtarget, RBI);
3460}
3461}