blob: 1d0cdc85a15b3f94f1c108be8e2be8000436e0fc [file] [log] [blame]
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001//===- AArch64InstructionSelector.cpp ----------------------------*- C++ -*-==//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file
10/// This file implements the targeting of the InstructionSelector class for
11/// AArch64.
12/// \todo This should be generated by TableGen.
13//===----------------------------------------------------------------------===//
14
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +000015#include "AArch64InstrInfo.h"
Tim Northovere9600d82017-02-08 17:57:27 +000016#include "AArch64MachineFunctionInfo.h"
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +000017#include "AArch64RegisterBankInfo.h"
18#include "AArch64RegisterInfo.h"
19#include "AArch64Subtarget.h"
Tim Northoverbdf16242016-10-10 21:50:00 +000020#include "AArch64TargetMachine.h"
Tim Northover9ac0eba2016-11-08 00:45:29 +000021#include "MCTargetDesc/AArch64AddressingModes.h"
Daniel Sanders0b5293f2017-04-06 09:49:34 +000022#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
David Blaikie62651302017-10-26 23:39:54 +000023#include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
Aditya Nandakumar75ad9cc2017-04-19 20:48:50 +000024#include "llvm/CodeGen/GlobalISel/Utils.h"
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +000025#include "llvm/CodeGen/MachineBasicBlock.h"
26#include "llvm/CodeGen/MachineFunction.h"
27#include "llvm/CodeGen/MachineInstr.h"
28#include "llvm/CodeGen/MachineInstrBuilder.h"
Daniel Sanders0b5293f2017-04-06 09:49:34 +000029#include "llvm/CodeGen/MachineOperand.h"
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +000030#include "llvm/CodeGen/MachineRegisterInfo.h"
31#include "llvm/IR/Type.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/Support/raw_ostream.h"
34
35#define DEBUG_TYPE "aarch64-isel"
36
37using namespace llvm;
38
Daniel Sanders0b5293f2017-04-06 09:49:34 +000039namespace {
40
Daniel Sanderse7b0d662017-04-21 15:59:56 +000041#define GET_GLOBALISEL_PREDICATE_BITSET
42#include "AArch64GenGlobalISel.inc"
43#undef GET_GLOBALISEL_PREDICATE_BITSET
44
Daniel Sanders0b5293f2017-04-06 09:49:34 +000045class AArch64InstructionSelector : public InstructionSelector {
46public:
47 AArch64InstructionSelector(const AArch64TargetMachine &TM,
48 const AArch64Subtarget &STI,
49 const AArch64RegisterBankInfo &RBI);
50
Daniel Sandersf76f3152017-11-16 00:46:35 +000051 bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const override;
David Blaikie62651302017-10-26 23:39:54 +000052 static const char *getName() { return DEBUG_TYPE; }
Daniel Sanders0b5293f2017-04-06 09:49:34 +000053
54private:
55 /// tblgen-erated 'select' implementation, used as the initial selector for
56 /// the patterns that don't require complex C++.
Daniel Sandersf76f3152017-11-16 00:46:35 +000057 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
Daniel Sanders0b5293f2017-04-06 09:49:34 +000058
59 bool selectVaStartAAPCS(MachineInstr &I, MachineFunction &MF,
60 MachineRegisterInfo &MRI) const;
61 bool selectVaStartDarwin(MachineInstr &I, MachineFunction &MF,
62 MachineRegisterInfo &MRI) const;
63
64 bool selectCompareBranch(MachineInstr &I, MachineFunction &MF,
65 MachineRegisterInfo &MRI) const;
66
Daniel Sanders1e4569f2017-10-20 20:55:29 +000067 ComplexRendererFns selectArithImmed(MachineOperand &Root) const;
Daniel Sanders0b5293f2017-04-06 09:49:34 +000068
Daniel Sanders1e4569f2017-10-20 20:55:29 +000069 ComplexRendererFns selectAddrModeUnscaled(MachineOperand &Root,
70 unsigned Size) const;
Daniel Sandersea8711b2017-10-16 03:36:29 +000071
Daniel Sanders1e4569f2017-10-20 20:55:29 +000072 ComplexRendererFns selectAddrModeUnscaled8(MachineOperand &Root) const {
Daniel Sandersea8711b2017-10-16 03:36:29 +000073 return selectAddrModeUnscaled(Root, 1);
74 }
Daniel Sanders1e4569f2017-10-20 20:55:29 +000075 ComplexRendererFns selectAddrModeUnscaled16(MachineOperand &Root) const {
Daniel Sandersea8711b2017-10-16 03:36:29 +000076 return selectAddrModeUnscaled(Root, 2);
77 }
Daniel Sanders1e4569f2017-10-20 20:55:29 +000078 ComplexRendererFns selectAddrModeUnscaled32(MachineOperand &Root) const {
Daniel Sandersea8711b2017-10-16 03:36:29 +000079 return selectAddrModeUnscaled(Root, 4);
80 }
Daniel Sanders1e4569f2017-10-20 20:55:29 +000081 ComplexRendererFns selectAddrModeUnscaled64(MachineOperand &Root) const {
Daniel Sandersea8711b2017-10-16 03:36:29 +000082 return selectAddrModeUnscaled(Root, 8);
83 }
Daniel Sanders1e4569f2017-10-20 20:55:29 +000084 ComplexRendererFns selectAddrModeUnscaled128(MachineOperand &Root) const {
Daniel Sandersea8711b2017-10-16 03:36:29 +000085 return selectAddrModeUnscaled(Root, 16);
86 }
87
Daniel Sanders1e4569f2017-10-20 20:55:29 +000088 ComplexRendererFns selectAddrModeIndexed(MachineOperand &Root,
89 unsigned Size) const;
Daniel Sandersea8711b2017-10-16 03:36:29 +000090 template <int Width>
Daniel Sanders1e4569f2017-10-20 20:55:29 +000091 ComplexRendererFns selectAddrModeIndexed(MachineOperand &Root) const {
Daniel Sandersea8711b2017-10-16 03:36:29 +000092 return selectAddrModeIndexed(Root, Width / 8);
93 }
94
Volkan Kelesf7f25682018-01-16 18:44:05 +000095 void renderTruncImm(MachineInstrBuilder &MIB, const MachineInstr &MI) const;
96
Daniel Sanders0b5293f2017-04-06 09:49:34 +000097 const AArch64TargetMachine &TM;
98 const AArch64Subtarget &STI;
99 const AArch64InstrInfo &TII;
100 const AArch64RegisterInfo &TRI;
101 const AArch64RegisterBankInfo &RBI;
Daniel Sanderse7b0d662017-04-21 15:59:56 +0000102
Daniel Sanderse9fdba32017-04-29 17:30:09 +0000103#define GET_GLOBALISEL_PREDICATES_DECL
104#include "AArch64GenGlobalISel.inc"
105#undef GET_GLOBALISEL_PREDICATES_DECL
Daniel Sanders0b5293f2017-04-06 09:49:34 +0000106
107// We declare the temporaries used by selectImpl() in the class to minimize the
108// cost of constructing placeholder values.
109#define GET_GLOBALISEL_TEMPORARIES_DECL
110#include "AArch64GenGlobalISel.inc"
111#undef GET_GLOBALISEL_TEMPORARIES_DECL
112};
113
114} // end anonymous namespace
115
Daniel Sanders8a4bae92017-03-14 21:32:08 +0000116#define GET_GLOBALISEL_IMPL
Ahmed Bougacha36f70352016-12-21 23:26:20 +0000117#include "AArch64GenGlobalISel.inc"
Daniel Sanders8a4bae92017-03-14 21:32:08 +0000118#undef GET_GLOBALISEL_IMPL
Ahmed Bougacha36f70352016-12-21 23:26:20 +0000119
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000120AArch64InstructionSelector::AArch64InstructionSelector(
Tim Northoverbdf16242016-10-10 21:50:00 +0000121 const AArch64TargetMachine &TM, const AArch64Subtarget &STI,
122 const AArch64RegisterBankInfo &RBI)
Daniel Sanders8a4bae92017-03-14 21:32:08 +0000123 : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
Daniel Sanderse9fdba32017-04-29 17:30:09 +0000124 TRI(*STI.getRegisterInfo()), RBI(RBI),
125#define GET_GLOBALISEL_PREDICATES_INIT
126#include "AArch64GenGlobalISel.inc"
127#undef GET_GLOBALISEL_PREDICATES_INIT
Daniel Sanders8a4bae92017-03-14 21:32:08 +0000128#define GET_GLOBALISEL_TEMPORARIES_INIT
129#include "AArch64GenGlobalISel.inc"
130#undef GET_GLOBALISEL_TEMPORARIES_INIT
131{
132}
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000133
Tim Northoverfb8d9892016-10-12 22:49:15 +0000134// FIXME: This should be target-independent, inferred from the types declared
135// for each class in the bank.
136static const TargetRegisterClass *
137getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB,
Amara Emerson3838ed02018-02-02 18:03:30 +0000138 const RegisterBankInfo &RBI,
139 bool GetAllRegSet = false) {
Tim Northoverfb8d9892016-10-12 22:49:15 +0000140 if (RB.getID() == AArch64::GPRRegBankID) {
141 if (Ty.getSizeInBits() <= 32)
Amara Emerson3838ed02018-02-02 18:03:30 +0000142 return GetAllRegSet ? &AArch64::GPR32allRegClass
143 : &AArch64::GPR32RegClass;
Tim Northoverfb8d9892016-10-12 22:49:15 +0000144 if (Ty.getSizeInBits() == 64)
Amara Emerson3838ed02018-02-02 18:03:30 +0000145 return GetAllRegSet ? &AArch64::GPR64allRegClass
146 : &AArch64::GPR64RegClass;
Tim Northoverfb8d9892016-10-12 22:49:15 +0000147 return nullptr;
148 }
149
150 if (RB.getID() == AArch64::FPRRegBankID) {
Amara Emerson3838ed02018-02-02 18:03:30 +0000151 if (Ty.getSizeInBits() <= 16)
152 return &AArch64::FPR16RegClass;
Tim Northoverfb8d9892016-10-12 22:49:15 +0000153 if (Ty.getSizeInBits() == 32)
154 return &AArch64::FPR32RegClass;
155 if (Ty.getSizeInBits() == 64)
156 return &AArch64::FPR64RegClass;
157 if (Ty.getSizeInBits() == 128)
158 return &AArch64::FPR128RegClass;
159 return nullptr;
160 }
161
162 return nullptr;
163}
164
Ahmed Bougacha59e160a2016-08-16 14:37:40 +0000165/// Check whether \p I is a currently unsupported binary operation:
166/// - it has an unsized type
167/// - an operand is not a vreg
168/// - all operands are not in the same bank
169/// These are checks that should someday live in the verifier, but right now,
170/// these are mostly limitations of the aarch64 selector.
171static bool unsupportedBinOp(const MachineInstr &I,
172 const AArch64RegisterBankInfo &RBI,
173 const MachineRegisterInfo &MRI,
174 const AArch64RegisterInfo &TRI) {
Tim Northover0f140c72016-09-09 11:46:34 +0000175 LLT Ty = MRI.getType(I.getOperand(0).getReg());
Tim Northover32a078a2016-09-15 10:09:59 +0000176 if (!Ty.isValid()) {
177 DEBUG(dbgs() << "Generic binop register should be typed\n");
Ahmed Bougacha59e160a2016-08-16 14:37:40 +0000178 return true;
179 }
180
181 const RegisterBank *PrevOpBank = nullptr;
182 for (auto &MO : I.operands()) {
183 // FIXME: Support non-register operands.
184 if (!MO.isReg()) {
185 DEBUG(dbgs() << "Generic inst non-reg operands are unsupported\n");
186 return true;
187 }
188
189 // FIXME: Can generic operations have physical registers operands? If
190 // so, this will need to be taught about that, and we'll need to get the
191 // bank out of the minimal class for the register.
192 // Either way, this needs to be documented (and possibly verified).
193 if (!TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
194 DEBUG(dbgs() << "Generic inst has physical register operand\n");
195 return true;
196 }
197
198 const RegisterBank *OpBank = RBI.getRegBank(MO.getReg(), MRI, TRI);
199 if (!OpBank) {
200 DEBUG(dbgs() << "Generic register has no bank or class\n");
201 return true;
202 }
203
204 if (PrevOpBank && OpBank != PrevOpBank) {
205 DEBUG(dbgs() << "Generic inst operands have different banks\n");
206 return true;
207 }
208 PrevOpBank = OpBank;
209 }
210 return false;
211}
212
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000213/// Select the AArch64 opcode for the basic binary operation \p GenericOpc
Ahmed Bougachacfb384d2017-01-23 21:10:05 +0000214/// (such as G_OR or G_SDIV), appropriate for the register bank \p RegBankID
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000215/// and of size \p OpSize.
216/// \returns \p GenericOpc if the combination is unsupported.
217static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID,
218 unsigned OpSize) {
219 switch (RegBankID) {
220 case AArch64::GPRRegBankID:
Ahmed Bougacha05a5f7d2017-01-25 02:41:38 +0000221 if (OpSize == 32) {
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000222 switch (GenericOpc) {
Ahmed Bougacha2ac5bf92016-08-16 14:02:47 +0000223 case TargetOpcode::G_SHL:
224 return AArch64::LSLVWr;
225 case TargetOpcode::G_LSHR:
226 return AArch64::LSRVWr;
227 case TargetOpcode::G_ASHR:
228 return AArch64::ASRVWr;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000229 default:
230 return GenericOpc;
231 }
Tim Northover55782222016-10-18 20:03:48 +0000232 } else if (OpSize == 64) {
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000233 switch (GenericOpc) {
Tim Northover2fda4b02016-10-10 21:49:49 +0000234 case TargetOpcode::G_GEP:
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000235 return AArch64::ADDXrr;
Ahmed Bougacha2ac5bf92016-08-16 14:02:47 +0000236 case TargetOpcode::G_SHL:
237 return AArch64::LSLVXr;
238 case TargetOpcode::G_LSHR:
239 return AArch64::LSRVXr;
240 case TargetOpcode::G_ASHR:
241 return AArch64::ASRVXr;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000242 default:
243 return GenericOpc;
244 }
245 }
Simon Pilgrim9e901522017-07-08 19:28:24 +0000246 break;
Ahmed Bougacha33e19fe2016-08-18 16:05:11 +0000247 case AArch64::FPRRegBankID:
248 switch (OpSize) {
249 case 32:
250 switch (GenericOpc) {
251 case TargetOpcode::G_FADD:
252 return AArch64::FADDSrr;
253 case TargetOpcode::G_FSUB:
254 return AArch64::FSUBSrr;
255 case TargetOpcode::G_FMUL:
256 return AArch64::FMULSrr;
257 case TargetOpcode::G_FDIV:
258 return AArch64::FDIVSrr;
259 default:
260 return GenericOpc;
261 }
262 case 64:
263 switch (GenericOpc) {
264 case TargetOpcode::G_FADD:
265 return AArch64::FADDDrr;
266 case TargetOpcode::G_FSUB:
267 return AArch64::FSUBDrr;
268 case TargetOpcode::G_FMUL:
269 return AArch64::FMULDrr;
270 case TargetOpcode::G_FDIV:
271 return AArch64::FDIVDrr;
Quentin Colombet0e531272016-10-11 00:21:11 +0000272 case TargetOpcode::G_OR:
273 return AArch64::ORRv8i8;
Ahmed Bougacha33e19fe2016-08-18 16:05:11 +0000274 default:
275 return GenericOpc;
276 }
277 }
Simon Pilgrim9e901522017-07-08 19:28:24 +0000278 break;
279 }
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000280 return GenericOpc;
281}
282
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000283/// Select the AArch64 opcode for the G_LOAD or G_STORE operation \p GenericOpc,
284/// appropriate for the (value) register bank \p RegBankID and of memory access
285/// size \p OpSize. This returns the variant with the base+unsigned-immediate
286/// addressing mode (e.g., LDRXui).
287/// \returns \p GenericOpc if the combination is unsupported.
288static unsigned selectLoadStoreUIOp(unsigned GenericOpc, unsigned RegBankID,
289 unsigned OpSize) {
290 const bool isStore = GenericOpc == TargetOpcode::G_STORE;
291 switch (RegBankID) {
292 case AArch64::GPRRegBankID:
293 switch (OpSize) {
Tim Northover020d1042016-10-17 18:36:53 +0000294 case 8:
295 return isStore ? AArch64::STRBBui : AArch64::LDRBBui;
296 case 16:
297 return isStore ? AArch64::STRHHui : AArch64::LDRHHui;
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000298 case 32:
299 return isStore ? AArch64::STRWui : AArch64::LDRWui;
300 case 64:
301 return isStore ? AArch64::STRXui : AArch64::LDRXui;
302 }
Simon Pilgrim9e901522017-07-08 19:28:24 +0000303 break;
Quentin Colombetd2623f8e2016-10-11 00:21:14 +0000304 case AArch64::FPRRegBankID:
305 switch (OpSize) {
Tim Northover020d1042016-10-17 18:36:53 +0000306 case 8:
307 return isStore ? AArch64::STRBui : AArch64::LDRBui;
308 case 16:
309 return isStore ? AArch64::STRHui : AArch64::LDRHui;
Quentin Colombetd2623f8e2016-10-11 00:21:14 +0000310 case 32:
311 return isStore ? AArch64::STRSui : AArch64::LDRSui;
312 case 64:
313 return isStore ? AArch64::STRDui : AArch64::LDRDui;
314 }
Simon Pilgrim9e901522017-07-08 19:28:24 +0000315 break;
316 }
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000317 return GenericOpc;
318}
319
Quentin Colombetcb629a82016-10-12 03:57:49 +0000320static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII,
321 MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
322 const RegisterBankInfo &RBI) {
323
324 unsigned DstReg = I.getOperand(0).getReg();
325 if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
326 assert(I.isCopy() && "Generic operators do not allow physical registers");
327 return true;
328 }
329
330 const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
331 const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
Amara Emerson3838ed02018-02-02 18:03:30 +0000332 (void)DstSize;
Quentin Colombetcb629a82016-10-12 03:57:49 +0000333 unsigned SrcReg = I.getOperand(1).getReg();
334 const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
335 (void)SrcSize;
336 assert((!TargetRegisterInfo::isPhysicalRegister(SrcReg) || I.isCopy()) &&
337 "No phys reg on generic operators");
338 assert(
339 (DstSize == SrcSize ||
340 // Copies are a mean to setup initial types, the number of
341 // bits may not exactly match.
342 (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
343 DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI)) ||
344 // Copies are a mean to copy bits around, as long as we are
345 // on the same register class, that's fine. Otherwise, that
346 // means we need some SUBREG_TO_REG or AND & co.
347 (((DstSize + 31) / 32 == (SrcSize + 31) / 32) && DstSize > SrcSize)) &&
348 "Copy with different width?!");
349 assert((DstSize <= 64 || RegBank.getID() == AArch64::FPRRegBankID) &&
350 "GPRs cannot get more than 64-bit width values");
Quentin Colombetcb629a82016-10-12 03:57:49 +0000351
Amara Emerson3838ed02018-02-02 18:03:30 +0000352 const TargetRegisterClass *RC = getRegClassForTypeOnBank(
353 MRI.getType(DstReg), RegBank, RBI, /* GetAllRegSet */ true);
354 if (!RC) {
355 DEBUG(dbgs() << "Unexpected bitcast size " << DstSize << '\n');
356 return false;
Quentin Colombetcb629a82016-10-12 03:57:49 +0000357 }
358
Amara Emerson7e9f3482018-02-18 17:10:49 +0000359 if (!TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
360 const RegClassOrRegBank &RegClassOrBank =
361 MRI.getRegClassOrRegBank(SrcReg);
362
363 const TargetRegisterClass *SrcRC =
364 RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
365 const RegisterBank *RB = nullptr;
366 if (!SrcRC) {
367 RB = RegClassOrBank.get<const RegisterBank *>();
368 SrcRC = getRegClassForTypeOnBank(MRI.getType(SrcReg), *RB, RBI, true);
369 }
370 // Copies from fpr16 to gpr32 need to use SUBREG_TO_REG.
371 if (RC == &AArch64::GPR32allRegClass && SrcRC == &AArch64::FPR16RegClass) {
372 unsigned PromoteReg = MRI.createVirtualRegister(&AArch64::FPR32RegClass);
373 BuildMI(*I.getParent(), I, I.getDebugLoc(),
374 TII.get(AArch64::SUBREG_TO_REG))
375 .addDef(PromoteReg)
376 .addImm(0)
377 .addUse(SrcReg)
378 .addImm(AArch64::hsub);
379 MachineOperand &RegOp = I.getOperand(1);
380 RegOp.setReg(PromoteReg);
381 }
382 }
383
Quentin Colombetcb629a82016-10-12 03:57:49 +0000384 // No need to constrain SrcReg. It will get constrained when
385 // we hit another of its use or its defs.
386 // Copies do not have constraints.
387 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
388 DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
389 << " operand\n");
390 return false;
391 }
392 I.setDesc(TII.get(AArch64::COPY));
393 return true;
394}
395
Tim Northover69271c62016-10-12 22:49:11 +0000396static unsigned selectFPConvOpc(unsigned GenericOpc, LLT DstTy, LLT SrcTy) {
397 if (!DstTy.isScalar() || !SrcTy.isScalar())
398 return GenericOpc;
399
400 const unsigned DstSize = DstTy.getSizeInBits();
401 const unsigned SrcSize = SrcTy.getSizeInBits();
402
403 switch (DstSize) {
404 case 32:
405 switch (SrcSize) {
406 case 32:
407 switch (GenericOpc) {
408 case TargetOpcode::G_SITOFP:
409 return AArch64::SCVTFUWSri;
410 case TargetOpcode::G_UITOFP:
411 return AArch64::UCVTFUWSri;
412 case TargetOpcode::G_FPTOSI:
413 return AArch64::FCVTZSUWSr;
414 case TargetOpcode::G_FPTOUI:
415 return AArch64::FCVTZUUWSr;
416 default:
417 return GenericOpc;
418 }
419 case 64:
420 switch (GenericOpc) {
421 case TargetOpcode::G_SITOFP:
422 return AArch64::SCVTFUXSri;
423 case TargetOpcode::G_UITOFP:
424 return AArch64::UCVTFUXSri;
425 case TargetOpcode::G_FPTOSI:
426 return AArch64::FCVTZSUWDr;
427 case TargetOpcode::G_FPTOUI:
428 return AArch64::FCVTZUUWDr;
429 default:
430 return GenericOpc;
431 }
432 default:
433 return GenericOpc;
434 }
435 case 64:
436 switch (SrcSize) {
437 case 32:
438 switch (GenericOpc) {
439 case TargetOpcode::G_SITOFP:
440 return AArch64::SCVTFUWDri;
441 case TargetOpcode::G_UITOFP:
442 return AArch64::UCVTFUWDri;
443 case TargetOpcode::G_FPTOSI:
444 return AArch64::FCVTZSUXSr;
445 case TargetOpcode::G_FPTOUI:
446 return AArch64::FCVTZUUXSr;
447 default:
448 return GenericOpc;
449 }
450 case 64:
451 switch (GenericOpc) {
452 case TargetOpcode::G_SITOFP:
453 return AArch64::SCVTFUXDri;
454 case TargetOpcode::G_UITOFP:
455 return AArch64::UCVTFUXDri;
456 case TargetOpcode::G_FPTOSI:
457 return AArch64::FCVTZSUXDr;
458 case TargetOpcode::G_FPTOUI:
459 return AArch64::FCVTZUUXDr;
460 default:
461 return GenericOpc;
462 }
463 default:
464 return GenericOpc;
465 }
466 default:
467 return GenericOpc;
468 };
469 return GenericOpc;
470}
471
Tim Northover6c02ad52016-10-12 22:49:04 +0000472static AArch64CC::CondCode changeICMPPredToAArch64CC(CmpInst::Predicate P) {
473 switch (P) {
474 default:
475 llvm_unreachable("Unknown condition code!");
476 case CmpInst::ICMP_NE:
477 return AArch64CC::NE;
478 case CmpInst::ICMP_EQ:
479 return AArch64CC::EQ;
480 case CmpInst::ICMP_SGT:
481 return AArch64CC::GT;
482 case CmpInst::ICMP_SGE:
483 return AArch64CC::GE;
484 case CmpInst::ICMP_SLT:
485 return AArch64CC::LT;
486 case CmpInst::ICMP_SLE:
487 return AArch64CC::LE;
488 case CmpInst::ICMP_UGT:
489 return AArch64CC::HI;
490 case CmpInst::ICMP_UGE:
491 return AArch64CC::HS;
492 case CmpInst::ICMP_ULT:
493 return AArch64CC::LO;
494 case CmpInst::ICMP_ULE:
495 return AArch64CC::LS;
496 }
497}
498
Tim Northover7dd378d2016-10-12 22:49:07 +0000499static void changeFCMPPredToAArch64CC(CmpInst::Predicate P,
500 AArch64CC::CondCode &CondCode,
501 AArch64CC::CondCode &CondCode2) {
502 CondCode2 = AArch64CC::AL;
503 switch (P) {
504 default:
505 llvm_unreachable("Unknown FP condition!");
506 case CmpInst::FCMP_OEQ:
507 CondCode = AArch64CC::EQ;
508 break;
509 case CmpInst::FCMP_OGT:
510 CondCode = AArch64CC::GT;
511 break;
512 case CmpInst::FCMP_OGE:
513 CondCode = AArch64CC::GE;
514 break;
515 case CmpInst::FCMP_OLT:
516 CondCode = AArch64CC::MI;
517 break;
518 case CmpInst::FCMP_OLE:
519 CondCode = AArch64CC::LS;
520 break;
521 case CmpInst::FCMP_ONE:
522 CondCode = AArch64CC::MI;
523 CondCode2 = AArch64CC::GT;
524 break;
525 case CmpInst::FCMP_ORD:
526 CondCode = AArch64CC::VC;
527 break;
528 case CmpInst::FCMP_UNO:
529 CondCode = AArch64CC::VS;
530 break;
531 case CmpInst::FCMP_UEQ:
532 CondCode = AArch64CC::EQ;
533 CondCode2 = AArch64CC::VS;
534 break;
535 case CmpInst::FCMP_UGT:
536 CondCode = AArch64CC::HI;
537 break;
538 case CmpInst::FCMP_UGE:
539 CondCode = AArch64CC::PL;
540 break;
541 case CmpInst::FCMP_ULT:
542 CondCode = AArch64CC::LT;
543 break;
544 case CmpInst::FCMP_ULE:
545 CondCode = AArch64CC::LE;
546 break;
547 case CmpInst::FCMP_UNE:
548 CondCode = AArch64CC::NE;
549 break;
550 }
551}
552
Ahmed Bougacha641cb202017-03-27 16:35:31 +0000553bool AArch64InstructionSelector::selectCompareBranch(
554 MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
555
556 const unsigned CondReg = I.getOperand(0).getReg();
557 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
558 MachineInstr *CCMI = MRI.getVRegDef(CondReg);
Aditya Nandakumar02c602e2017-07-31 17:00:16 +0000559 if (CCMI->getOpcode() == TargetOpcode::G_TRUNC)
560 CCMI = MRI.getVRegDef(CCMI->getOperand(1).getReg());
Ahmed Bougacha641cb202017-03-27 16:35:31 +0000561 if (CCMI->getOpcode() != TargetOpcode::G_ICMP)
562 return false;
563
564 unsigned LHS = CCMI->getOperand(2).getReg();
565 unsigned RHS = CCMI->getOperand(3).getReg();
566 if (!getConstantVRegVal(RHS, MRI))
567 std::swap(RHS, LHS);
568
569 const auto RHSImm = getConstantVRegVal(RHS, MRI);
570 if (!RHSImm || *RHSImm != 0)
571 return false;
572
573 const RegisterBank &RB = *RBI.getRegBank(LHS, MRI, TRI);
574 if (RB.getID() != AArch64::GPRRegBankID)
575 return false;
576
577 const auto Pred = (CmpInst::Predicate)CCMI->getOperand(1).getPredicate();
578 if (Pred != CmpInst::ICMP_NE && Pred != CmpInst::ICMP_EQ)
579 return false;
580
581 const unsigned CmpWidth = MRI.getType(LHS).getSizeInBits();
582 unsigned CBOpc = 0;
583 if (CmpWidth <= 32)
584 CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZW : AArch64::CBNZW);
585 else if (CmpWidth == 64)
586 CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZX : AArch64::CBNZX);
587 else
588 return false;
589
Aditya Nandakumar18b3f9d2018-01-17 19:31:33 +0000590 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CBOpc))
591 .addUse(LHS)
592 .addMBB(DestMBB)
593 .constrainAllUses(TII, TRI, RBI);
Ahmed Bougacha641cb202017-03-27 16:35:31 +0000594
Ahmed Bougacha641cb202017-03-27 16:35:31 +0000595 I.eraseFromParent();
596 return true;
597}
598
Tim Northovere9600d82017-02-08 17:57:27 +0000599bool AArch64InstructionSelector::selectVaStartAAPCS(
600 MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
601 return false;
602}
603
604bool AArch64InstructionSelector::selectVaStartDarwin(
605 MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
606 AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
607 unsigned ListReg = I.getOperand(0).getReg();
608
609 unsigned ArgsAddrReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
610
611 auto MIB =
612 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::ADDXri))
613 .addDef(ArgsAddrReg)
614 .addFrameIndex(FuncInfo->getVarArgsStackIndex())
615 .addImm(0)
616 .addImm(0);
617
618 constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
619
620 MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::STRXui))
621 .addUse(ArgsAddrReg)
622 .addUse(ListReg)
623 .addImm(0)
624 .addMemOperand(*I.memoperands_begin());
625
626 constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
627 I.eraseFromParent();
628 return true;
629}
630
Daniel Sandersf76f3152017-11-16 00:46:35 +0000631bool AArch64InstructionSelector::select(MachineInstr &I,
632 CodeGenCoverage &CoverageInfo) const {
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000633 assert(I.getParent() && "Instruction should be in a basic block!");
634 assert(I.getParent()->getParent() && "Instruction should be in a function!");
635
636 MachineBasicBlock &MBB = *I.getParent();
637 MachineFunction &MF = *MBB.getParent();
638 MachineRegisterInfo &MRI = MF.getRegInfo();
639
Tim Northovercdf23f12016-10-31 18:30:59 +0000640 unsigned Opcode = I.getOpcode();
Aditya Nandakumarefd8a842017-08-23 20:45:48 +0000641 // G_PHI requires same handling as PHI
642 if (!isPreISelGenericOpcode(Opcode) || Opcode == TargetOpcode::G_PHI) {
Tim Northovercdf23f12016-10-31 18:30:59 +0000643 // Certain non-generic instructions also need some special handling.
644
645 if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
646 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
Tim Northover7d88da62016-11-08 00:34:06 +0000647
Aditya Nandakumarefd8a842017-08-23 20:45:48 +0000648 if (Opcode == TargetOpcode::PHI || Opcode == TargetOpcode::G_PHI) {
Tim Northover7d88da62016-11-08 00:34:06 +0000649 const unsigned DefReg = I.getOperand(0).getReg();
650 const LLT DefTy = MRI.getType(DefReg);
651
652 const TargetRegisterClass *DefRC = nullptr;
653 if (TargetRegisterInfo::isPhysicalRegister(DefReg)) {
654 DefRC = TRI.getRegClass(DefReg);
655 } else {
656 const RegClassOrRegBank &RegClassOrBank =
657 MRI.getRegClassOrRegBank(DefReg);
658
659 DefRC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
660 if (!DefRC) {
661 if (!DefTy.isValid()) {
662 DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
663 return false;
664 }
665 const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
666 DefRC = getRegClassForTypeOnBank(DefTy, RB, RBI);
667 if (!DefRC) {
668 DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
669 return false;
670 }
671 }
672 }
Aditya Nandakumarefd8a842017-08-23 20:45:48 +0000673 I.setDesc(TII.get(TargetOpcode::PHI));
Tim Northover7d88da62016-11-08 00:34:06 +0000674
675 return RBI.constrainGenericRegister(DefReg, *DefRC, MRI);
676 }
677
678 if (I.isCopy())
Tim Northovercdf23f12016-10-31 18:30:59 +0000679 return selectCopy(I, TII, MRI, TRI, RBI);
Tim Northover7d88da62016-11-08 00:34:06 +0000680
681 return true;
Tim Northovercdf23f12016-10-31 18:30:59 +0000682 }
683
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000684
685 if (I.getNumOperands() != I.getNumExplicitOperands()) {
686 DEBUG(dbgs() << "Generic instruction has unexpected implicit operands\n");
687 return false;
688 }
689
Daniel Sandersf76f3152017-11-16 00:46:35 +0000690 if (selectImpl(I, CoverageInfo))
Ahmed Bougacha36f70352016-12-21 23:26:20 +0000691 return true;
692
Tim Northover32a078a2016-09-15 10:09:59 +0000693 LLT Ty =
694 I.getOperand(0).isReg() ? MRI.getType(I.getOperand(0).getReg()) : LLT{};
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000695
Tim Northover69271c62016-10-12 22:49:11 +0000696 switch (Opcode) {
Tim Northover5e3dbf32016-10-12 22:49:01 +0000697 case TargetOpcode::G_BRCOND: {
698 if (Ty.getSizeInBits() > 32) {
699 // We shouldn't need this on AArch64, but it would be implemented as an
700 // EXTRACT_SUBREG followed by a TBNZW because TBNZX has no encoding if the
701 // bit being tested is < 32.
702 DEBUG(dbgs() << "G_BRCOND has type: " << Ty
703 << ", expected at most 32-bits");
704 return false;
705 }
706
707 const unsigned CondReg = I.getOperand(0).getReg();
708 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
709
Ahmed Bougacha641cb202017-03-27 16:35:31 +0000710 if (selectCompareBranch(I, MF, MRI))
711 return true;
712
Tim Northover5e3dbf32016-10-12 22:49:01 +0000713 auto MIB = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::TBNZW))
714 .addUse(CondReg)
715 .addImm(/*bit offset=*/0)
716 .addMBB(DestMBB);
717
718 I.eraseFromParent();
719 return constrainSelectedInstRegOperands(*MIB.getInstr(), TII, TRI, RBI);
720 }
721
Kristof Beyls65a12c02017-01-30 09:13:18 +0000722 case TargetOpcode::G_BRINDIRECT: {
723 I.setDesc(TII.get(AArch64::BR));
724 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
725 }
726
Tim Northover4494d692016-10-18 19:47:57 +0000727 case TargetOpcode::G_FCONSTANT:
Tim Northover4edc60d2016-10-10 21:49:42 +0000728 case TargetOpcode::G_CONSTANT: {
Tim Northover4494d692016-10-18 19:47:57 +0000729 const bool isFP = Opcode == TargetOpcode::G_FCONSTANT;
730
731 const LLT s32 = LLT::scalar(32);
732 const LLT s64 = LLT::scalar(64);
733 const LLT p0 = LLT::pointer(0, 64);
734
735 const unsigned DefReg = I.getOperand(0).getReg();
736 const LLT DefTy = MRI.getType(DefReg);
737 const unsigned DefSize = DefTy.getSizeInBits();
738 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
739
740 // FIXME: Redundant check, but even less readable when factored out.
741 if (isFP) {
742 if (Ty != s32 && Ty != s64) {
743 DEBUG(dbgs() << "Unable to materialize FP " << Ty
744 << " constant, expected: " << s32 << " or " << s64
745 << '\n');
746 return false;
747 }
748
749 if (RB.getID() != AArch64::FPRRegBankID) {
750 DEBUG(dbgs() << "Unable to materialize FP " << Ty
751 << " constant on bank: " << RB << ", expected: FPR\n");
752 return false;
753 }
Daniel Sanders11300ce2017-10-13 21:28:03 +0000754
755 // The case when we have 0.0 is covered by tablegen. Reject it here so we
756 // can be sure tablegen works correctly and isn't rescued by this code.
757 if (I.getOperand(1).getFPImm()->getValueAPF().isExactlyValue(0.0))
758 return false;
Tim Northover4494d692016-10-18 19:47:57 +0000759 } else {
Daniel Sanders05540042017-08-08 10:44:31 +0000760 // s32 and s64 are covered by tablegen.
761 if (Ty != p0) {
Tim Northover4494d692016-10-18 19:47:57 +0000762 DEBUG(dbgs() << "Unable to materialize integer " << Ty
763 << " constant, expected: " << s32 << ", " << s64 << ", or "
764 << p0 << '\n');
765 return false;
766 }
767
768 if (RB.getID() != AArch64::GPRRegBankID) {
769 DEBUG(dbgs() << "Unable to materialize integer " << Ty
770 << " constant on bank: " << RB << ", expected: GPR\n");
771 return false;
772 }
773 }
774
775 const unsigned MovOpc =
776 DefSize == 32 ? AArch64::MOVi32imm : AArch64::MOVi64imm;
777
778 I.setDesc(TII.get(MovOpc));
779
780 if (isFP) {
781 const TargetRegisterClass &GPRRC =
782 DefSize == 32 ? AArch64::GPR32RegClass : AArch64::GPR64RegClass;
783 const TargetRegisterClass &FPRRC =
784 DefSize == 32 ? AArch64::FPR32RegClass : AArch64::FPR64RegClass;
785
786 const unsigned DefGPRReg = MRI.createVirtualRegister(&GPRRC);
787 MachineOperand &RegOp = I.getOperand(0);
788 RegOp.setReg(DefGPRReg);
789
790 BuildMI(MBB, std::next(I.getIterator()), I.getDebugLoc(),
791 TII.get(AArch64::COPY))
792 .addDef(DefReg)
793 .addUse(DefGPRReg);
794
795 if (!RBI.constrainGenericRegister(DefReg, FPRRC, MRI)) {
796 DEBUG(dbgs() << "Failed to constrain G_FCONSTANT def operand\n");
797 return false;
798 }
799
800 MachineOperand &ImmOp = I.getOperand(1);
801 // FIXME: Is going through int64_t always correct?
802 ImmOp.ChangeToImmediate(
803 ImmOp.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
Daniel Sanders066ebbf2017-02-24 15:43:30 +0000804 } else if (I.getOperand(1).isCImm()) {
Tim Northover9267ac52016-12-05 21:47:07 +0000805 uint64_t Val = I.getOperand(1).getCImm()->getZExtValue();
806 I.getOperand(1).ChangeToImmediate(Val);
Daniel Sanders066ebbf2017-02-24 15:43:30 +0000807 } else if (I.getOperand(1).isImm()) {
808 uint64_t Val = I.getOperand(1).getImm();
809 I.getOperand(1).ChangeToImmediate(Val);
Tim Northover4494d692016-10-18 19:47:57 +0000810 }
811
812 constrainSelectedInstRegOperands(I, TII, TRI, RBI);
813 return true;
Tim Northover4edc60d2016-10-10 21:49:42 +0000814 }
Tim Northover7b6d66c2017-07-20 22:58:38 +0000815 case TargetOpcode::G_EXTRACT: {
816 LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
Amara Emersonbc03bae2018-02-18 17:03:02 +0000817 LLT DstTy = MRI.getType(I.getOperand(0).getReg());
Amara Emerson242efdb2018-02-18 17:28:34 +0000818 (void)DstTy;
Amara Emersonbc03bae2018-02-18 17:03:02 +0000819 unsigned SrcSize = SrcTy.getSizeInBits();
Tim Northover7b6d66c2017-07-20 22:58:38 +0000820 // Larger extracts are vectors, same-size extracts should be something else
821 // by now (either split up or simplified to a COPY).
822 if (SrcTy.getSizeInBits() > 64 || Ty.getSizeInBits() > 32)
823 return false;
824
Amara Emersonbc03bae2018-02-18 17:03:02 +0000825 I.setDesc(TII.get(SrcSize == 64 ? AArch64::UBFMXri : AArch64::UBFMWri));
Tim Northover7b6d66c2017-07-20 22:58:38 +0000826 MachineInstrBuilder(MF, I).addImm(I.getOperand(2).getImm() +
827 Ty.getSizeInBits() - 1);
828
Amara Emersonbc03bae2018-02-18 17:03:02 +0000829 if (SrcSize < 64) {
830 assert(SrcSize == 32 && DstTy.getSizeInBits() == 16 &&
831 "unexpected G_EXTRACT types");
832 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
833 }
834
Tim Northover7b6d66c2017-07-20 22:58:38 +0000835 unsigned DstReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
836 BuildMI(MBB, std::next(I.getIterator()), I.getDebugLoc(),
837 TII.get(AArch64::COPY))
838 .addDef(I.getOperand(0).getReg())
839 .addUse(DstReg, 0, AArch64::sub_32);
840 RBI.constrainGenericRegister(I.getOperand(0).getReg(),
841 AArch64::GPR32RegClass, MRI);
842 I.getOperand(0).setReg(DstReg);
843
844 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
845 }
846
847 case TargetOpcode::G_INSERT: {
848 LLT SrcTy = MRI.getType(I.getOperand(2).getReg());
Amara Emersonbc03bae2018-02-18 17:03:02 +0000849 LLT DstTy = MRI.getType(I.getOperand(0).getReg());
850 unsigned DstSize = DstTy.getSizeInBits();
Tim Northover7b6d66c2017-07-20 22:58:38 +0000851 // Larger inserts are vectors, same-size ones should be something else by
852 // now (split up or turned into COPYs).
853 if (Ty.getSizeInBits() > 64 || SrcTy.getSizeInBits() > 32)
854 return false;
855
Amara Emersonbc03bae2018-02-18 17:03:02 +0000856 I.setDesc(TII.get(DstSize == 64 ? AArch64::BFMXri : AArch64::BFMWri));
Tim Northover7b6d66c2017-07-20 22:58:38 +0000857 unsigned LSB = I.getOperand(3).getImm();
858 unsigned Width = MRI.getType(I.getOperand(2).getReg()).getSizeInBits();
Amara Emersonbc03bae2018-02-18 17:03:02 +0000859 I.getOperand(3).setImm((DstSize - LSB) % DstSize);
Tim Northover7b6d66c2017-07-20 22:58:38 +0000860 MachineInstrBuilder(MF, I).addImm(Width - 1);
861
Amara Emersonbc03bae2018-02-18 17:03:02 +0000862 if (DstSize < 64) {
863 assert(DstSize == 32 && SrcTy.getSizeInBits() == 16 &&
864 "unexpected G_INSERT types");
865 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
866 }
867
Tim Northover7b6d66c2017-07-20 22:58:38 +0000868 unsigned SrcReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
869 BuildMI(MBB, I.getIterator(), I.getDebugLoc(),
870 TII.get(AArch64::SUBREG_TO_REG))
871 .addDef(SrcReg)
872 .addImm(0)
873 .addUse(I.getOperand(2).getReg())
874 .addImm(AArch64::sub_32);
875 RBI.constrainGenericRegister(I.getOperand(2).getReg(),
876 AArch64::GPR32RegClass, MRI);
877 I.getOperand(2).setReg(SrcReg);
878
879 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
880 }
Ahmed Bougacha0306b5e2016-08-16 14:02:42 +0000881 case TargetOpcode::G_FRAME_INDEX: {
882 // allocas and G_FRAME_INDEX are only supported in addrspace(0).
Tim Northover5ae83502016-09-15 09:20:34 +0000883 if (Ty != LLT::pointer(0, 64)) {
Tim Northover0f140c72016-09-09 11:46:34 +0000884 DEBUG(dbgs() << "G_FRAME_INDEX pointer has type: " << Ty
Tim Northover5ae83502016-09-15 09:20:34 +0000885 << ", expected: " << LLT::pointer(0, 64) << '\n');
Ahmed Bougacha0306b5e2016-08-16 14:02:42 +0000886 return false;
887 }
Ahmed Bougacha0306b5e2016-08-16 14:02:42 +0000888 I.setDesc(TII.get(AArch64::ADDXri));
Ahmed Bougacha0306b5e2016-08-16 14:02:42 +0000889
890 // MOs for a #0 shifted immediate.
891 I.addOperand(MachineOperand::CreateImm(0));
892 I.addOperand(MachineOperand::CreateImm(0));
893
894 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
895 }
Tim Northoverbdf16242016-10-10 21:50:00 +0000896
897 case TargetOpcode::G_GLOBAL_VALUE: {
898 auto GV = I.getOperand(1).getGlobal();
899 if (GV->isThreadLocal()) {
900 // FIXME: we don't support TLS yet.
901 return false;
902 }
903 unsigned char OpFlags = STI.ClassifyGlobalReference(GV, TM);
Tim Northoverfe7c59a2016-12-13 18:25:38 +0000904 if (OpFlags & AArch64II::MO_GOT) {
Tim Northoverbdf16242016-10-10 21:50:00 +0000905 I.setDesc(TII.get(AArch64::LOADgot));
Tim Northoverfe7c59a2016-12-13 18:25:38 +0000906 I.getOperand(1).setTargetFlags(OpFlags);
Amara Emersond5785772018-01-18 19:21:27 +0000907 } else if (TM.getCodeModel() == CodeModel::Large) {
908 // Materialize the global using movz/movk instructions.
909 unsigned MovZDstReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
910 auto InsertPt = std::next(I.getIterator());
911 auto MovZ =
912 BuildMI(MBB, InsertPt, I.getDebugLoc(), TII.get(AArch64::MOVZXi))
913 .addDef(MovZDstReg);
914 MovZ->addOperand(MF, I.getOperand(1));
915 MovZ->getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_G0 |
916 AArch64II::MO_NC);
917 MovZ->addOperand(MF, MachineOperand::CreateImm(0));
918 constrainSelectedInstRegOperands(*MovZ, TII, TRI, RBI);
919
920 auto BuildMovK = [&](unsigned SrcReg, unsigned char Flags,
921 unsigned Offset, unsigned ForceDstReg) {
922 unsigned DstReg =
923 ForceDstReg ? ForceDstReg
924 : MRI.createVirtualRegister(&AArch64::GPR64RegClass);
925 auto MovI = BuildMI(MBB, InsertPt, MovZ->getDebugLoc(),
926 TII.get(AArch64::MOVKXi))
927 .addDef(DstReg)
928 .addReg(SrcReg);
929 MovI->addOperand(MF, MachineOperand::CreateGA(
930 GV, MovZ->getOperand(1).getOffset(), Flags));
931 MovI->addOperand(MF, MachineOperand::CreateImm(Offset));
932 constrainSelectedInstRegOperands(*MovI, TII, TRI, RBI);
933 return DstReg;
934 };
935 unsigned DstReg = BuildMovK(MovZ->getOperand(0).getReg(),
936 AArch64II::MO_G1 | AArch64II::MO_NC, 16, 0);
937 DstReg = BuildMovK(DstReg, AArch64II::MO_G2 | AArch64II::MO_NC, 32, 0);
938 BuildMovK(DstReg, AArch64II::MO_G3, 48, I.getOperand(0).getReg());
939 I.eraseFromParent();
940 return true;
Tim Northoverfe7c59a2016-12-13 18:25:38 +0000941 } else {
Tim Northoverbdf16242016-10-10 21:50:00 +0000942 I.setDesc(TII.get(AArch64::MOVaddr));
943 I.getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_PAGE);
944 MachineInstrBuilder MIB(MF, I);
945 MIB.addGlobalAddress(GV, I.getOperand(1).getOffset(),
946 OpFlags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
947 }
948 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
949 }
950
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000951 case TargetOpcode::G_LOAD:
952 case TargetOpcode::G_STORE: {
Tim Northover0f140c72016-09-09 11:46:34 +0000953 LLT MemTy = Ty;
954 LLT PtrTy = MRI.getType(I.getOperand(1).getReg());
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000955
Tim Northover5ae83502016-09-15 09:20:34 +0000956 if (PtrTy != LLT::pointer(0, 64)) {
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000957 DEBUG(dbgs() << "Load/Store pointer has type: " << PtrTy
Tim Northover5ae83502016-09-15 09:20:34 +0000958 << ", expected: " << LLT::pointer(0, 64) << '\n');
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000959 return false;
960 }
961
Daniel Sanders3c1c4c02017-12-05 05:52:07 +0000962 auto &MemOp = **I.memoperands_begin();
963 if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
964 DEBUG(dbgs() << "Atomic load/store not supported yet\n");
965 return false;
966 }
967
Amara Emerson4f84f882018-01-24 20:35:37 +0000968 // FIXME: PR36018: Volatile loads in some cases are incorrectly selected by
969 // folding with an extend. Until we have a G_SEXTLOAD solution bail out if
970 // we hit one.
971 if (Opcode == TargetOpcode::G_LOAD && MemOp.isVolatile())
972 return false;
973
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000974 const unsigned PtrReg = I.getOperand(1).getReg();
Ahmed Bougachaf0b22c42017-03-27 18:14:20 +0000975#ifndef NDEBUG
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000976 const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, MRI, TRI);
Ahmed Bougachaf0b22c42017-03-27 18:14:20 +0000977 // Sanity-check the pointer register.
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000978 assert(PtrRB.getID() == AArch64::GPRRegBankID &&
979 "Load/Store pointer operand isn't a GPR");
Tim Northover0f140c72016-09-09 11:46:34 +0000980 assert(MRI.getType(PtrReg).isPointer() &&
981 "Load/Store pointer operand isn't a pointer");
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000982#endif
983
984 const unsigned ValReg = I.getOperand(0).getReg();
985 const RegisterBank &RB = *RBI.getRegBank(ValReg, MRI, TRI);
986
987 const unsigned NewOpc =
988 selectLoadStoreUIOp(I.getOpcode(), RB.getID(), MemTy.getSizeInBits());
989 if (NewOpc == I.getOpcode())
990 return false;
991
992 I.setDesc(TII.get(NewOpc));
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000993
Ahmed Bougacha8a654082017-03-27 17:31:52 +0000994 uint64_t Offset = 0;
995 auto *PtrMI = MRI.getVRegDef(PtrReg);
996
997 // Try to fold a GEP into our unsigned immediate addressing mode.
998 if (PtrMI->getOpcode() == TargetOpcode::G_GEP) {
999 if (auto COff = getConstantVRegVal(PtrMI->getOperand(2).getReg(), MRI)) {
1000 int64_t Imm = *COff;
1001 const unsigned Size = MemTy.getSizeInBits() / 8;
1002 const unsigned Scale = Log2_32(Size);
1003 if ((Imm & (Size - 1)) == 0 && Imm >= 0 && Imm < (0x1000 << Scale)) {
1004 unsigned Ptr2Reg = PtrMI->getOperand(1).getReg();
1005 I.getOperand(1).setReg(Ptr2Reg);
1006 PtrMI = MRI.getVRegDef(Ptr2Reg);
1007 Offset = Imm / Size;
1008 }
1009 }
1010 }
1011
Ahmed Bougachaf75782f2017-03-27 17:31:56 +00001012 // If we haven't folded anything into our addressing mode yet, try to fold
1013 // a frame index into the base+offset.
1014 if (!Offset && PtrMI->getOpcode() == TargetOpcode::G_FRAME_INDEX)
1015 I.getOperand(1).ChangeToFrameIndex(PtrMI->getOperand(1).getIndex());
1016
Ahmed Bougacha8a654082017-03-27 17:31:52 +00001017 I.addOperand(MachineOperand::CreateImm(Offset));
Ahmed Bougacha85a66a62017-03-27 17:31:48 +00001018
1019 // If we're storing a 0, use WZR/XZR.
1020 if (auto CVal = getConstantVRegVal(ValReg, MRI)) {
1021 if (*CVal == 0 && Opcode == TargetOpcode::G_STORE) {
1022 if (I.getOpcode() == AArch64::STRWui)
1023 I.getOperand(0).setReg(AArch64::WZR);
1024 else if (I.getOpcode() == AArch64::STRXui)
1025 I.getOperand(0).setReg(AArch64::XZR);
1026 }
1027 }
1028
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001029 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1030 }
1031
Tim Northover9dd78f82017-02-08 21:22:25 +00001032 case TargetOpcode::G_SMULH:
1033 case TargetOpcode::G_UMULH: {
1034 // Reject the various things we don't support yet.
1035 if (unsupportedBinOp(I, RBI, MRI, TRI))
1036 return false;
1037
1038 const unsigned DefReg = I.getOperand(0).getReg();
1039 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1040
1041 if (RB.getID() != AArch64::GPRRegBankID) {
1042 DEBUG(dbgs() << "G_[SU]MULH on bank: " << RB << ", expected: GPR\n");
1043 return false;
1044 }
1045
1046 if (Ty != LLT::scalar(64)) {
1047 DEBUG(dbgs() << "G_[SU]MULH has type: " << Ty
1048 << ", expected: " << LLT::scalar(64) << '\n');
1049 return false;
1050 }
1051
1052 unsigned NewOpc = I.getOpcode() == TargetOpcode::G_SMULH ? AArch64::SMULHrr
1053 : AArch64::UMULHrr;
1054 I.setDesc(TII.get(NewOpc));
1055
1056 // Now that we selected an opcode, we need to constrain the register
1057 // operands to use appropriate classes.
1058 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1059 }
Ahmed Bougacha33e19fe2016-08-18 16:05:11 +00001060 case TargetOpcode::G_FADD:
1061 case TargetOpcode::G_FSUB:
1062 case TargetOpcode::G_FMUL:
1063 case TargetOpcode::G_FDIV:
1064
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001065 case TargetOpcode::G_OR:
Ahmed Bougacha2ac5bf92016-08-16 14:02:47 +00001066 case TargetOpcode::G_SHL:
1067 case TargetOpcode::G_LSHR:
1068 case TargetOpcode::G_ASHR:
Tim Northover2fda4b02016-10-10 21:49:49 +00001069 case TargetOpcode::G_GEP: {
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001070 // Reject the various things we don't support yet.
Ahmed Bougacha59e160a2016-08-16 14:37:40 +00001071 if (unsupportedBinOp(I, RBI, MRI, TRI))
1072 return false;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001073
Ahmed Bougacha59e160a2016-08-16 14:37:40 +00001074 const unsigned OpSize = Ty.getSizeInBits();
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001075
1076 const unsigned DefReg = I.getOperand(0).getReg();
1077 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1078
1079 const unsigned NewOpc = selectBinaryOp(I.getOpcode(), RB.getID(), OpSize);
1080 if (NewOpc == I.getOpcode())
1081 return false;
1082
1083 I.setDesc(TII.get(NewOpc));
1084 // FIXME: Should the type be always reset in setDesc?
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001085
1086 // Now that we selected an opcode, we need to constrain the register
1087 // operands to use appropriate classes.
1088 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1089 }
Tim Northover3d38b3a2016-10-11 20:50:21 +00001090
Tim Northover398c5f52017-02-14 20:56:29 +00001091 case TargetOpcode::G_PTR_MASK: {
1092 uint64_t Align = I.getOperand(2).getImm();
1093 if (Align >= 64 || Align == 0)
1094 return false;
1095
1096 uint64_t Mask = ~((1ULL << Align) - 1);
1097 I.setDesc(TII.get(AArch64::ANDXri));
1098 I.getOperand(2).setImm(AArch64_AM::encodeLogicalImmediate(Mask, 64));
1099
1100 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1101 }
Tim Northover037af52c2016-10-31 18:31:09 +00001102 case TargetOpcode::G_PTRTOINT:
Tim Northoverfb8d9892016-10-12 22:49:15 +00001103 case TargetOpcode::G_TRUNC: {
1104 const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
1105 const LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
1106
1107 const unsigned DstReg = I.getOperand(0).getReg();
1108 const unsigned SrcReg = I.getOperand(1).getReg();
1109
1110 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
1111 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
1112
1113 if (DstRB.getID() != SrcRB.getID()) {
Daniel Sanderscc36dbf2017-06-27 10:11:39 +00001114 DEBUG(dbgs() << "G_TRUNC/G_PTRTOINT input/output on different banks\n");
Tim Northoverfb8d9892016-10-12 22:49:15 +00001115 return false;
1116 }
1117
1118 if (DstRB.getID() == AArch64::GPRRegBankID) {
1119 const TargetRegisterClass *DstRC =
1120 getRegClassForTypeOnBank(DstTy, DstRB, RBI);
1121 if (!DstRC)
1122 return false;
1123
1124 const TargetRegisterClass *SrcRC =
1125 getRegClassForTypeOnBank(SrcTy, SrcRB, RBI);
1126 if (!SrcRC)
1127 return false;
1128
1129 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1130 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
Daniel Sanderscc36dbf2017-06-27 10:11:39 +00001131 DEBUG(dbgs() << "Failed to constrain G_TRUNC/G_PTRTOINT\n");
Tim Northoverfb8d9892016-10-12 22:49:15 +00001132 return false;
1133 }
1134
1135 if (DstRC == SrcRC) {
1136 // Nothing to be done
Daniel Sanderscc36dbf2017-06-27 10:11:39 +00001137 } else if (Opcode == TargetOpcode::G_TRUNC && DstTy == LLT::scalar(32) &&
1138 SrcTy == LLT::scalar(64)) {
1139 llvm_unreachable("TableGen can import this case");
1140 return false;
Tim Northoverfb8d9892016-10-12 22:49:15 +00001141 } else if (DstRC == &AArch64::GPR32RegClass &&
1142 SrcRC == &AArch64::GPR64RegClass) {
1143 I.getOperand(1).setSubReg(AArch64::sub_32);
1144 } else {
Daniel Sanderscc36dbf2017-06-27 10:11:39 +00001145 DEBUG(dbgs() << "Unhandled mismatched classes in G_TRUNC/G_PTRTOINT\n");
Tim Northoverfb8d9892016-10-12 22:49:15 +00001146 return false;
1147 }
1148
1149 I.setDesc(TII.get(TargetOpcode::COPY));
1150 return true;
1151 } else if (DstRB.getID() == AArch64::FPRRegBankID) {
1152 if (DstTy == LLT::vector(4, 16) && SrcTy == LLT::vector(4, 32)) {
1153 I.setDesc(TII.get(AArch64::XTNv4i16));
1154 constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1155 return true;
1156 }
1157 }
1158
1159 return false;
1160 }
1161
Tim Northover3d38b3a2016-10-11 20:50:21 +00001162 case TargetOpcode::G_ANYEXT: {
1163 const unsigned DstReg = I.getOperand(0).getReg();
1164 const unsigned SrcReg = I.getOperand(1).getReg();
1165
Quentin Colombetcb629a82016-10-12 03:57:49 +00001166 const RegisterBank &RBDst = *RBI.getRegBank(DstReg, MRI, TRI);
1167 if (RBDst.getID() != AArch64::GPRRegBankID) {
1168 DEBUG(dbgs() << "G_ANYEXT on bank: " << RBDst << ", expected: GPR\n");
1169 return false;
1170 }
Tim Northover3d38b3a2016-10-11 20:50:21 +00001171
Quentin Colombetcb629a82016-10-12 03:57:49 +00001172 const RegisterBank &RBSrc = *RBI.getRegBank(SrcReg, MRI, TRI);
1173 if (RBSrc.getID() != AArch64::GPRRegBankID) {
1174 DEBUG(dbgs() << "G_ANYEXT on bank: " << RBSrc << ", expected: GPR\n");
Tim Northover3d38b3a2016-10-11 20:50:21 +00001175 return false;
1176 }
1177
1178 const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
1179
1180 if (DstSize == 0) {
1181 DEBUG(dbgs() << "G_ANYEXT operand has no size, not a gvreg?\n");
1182 return false;
1183 }
1184
Quentin Colombetcb629a82016-10-12 03:57:49 +00001185 if (DstSize != 64 && DstSize > 32) {
Tim Northover3d38b3a2016-10-11 20:50:21 +00001186 DEBUG(dbgs() << "G_ANYEXT to size: " << DstSize
1187 << ", expected: 32 or 64\n");
1188 return false;
1189 }
Quentin Colombetcb629a82016-10-12 03:57:49 +00001190 // At this point G_ANYEXT is just like a plain COPY, but we need
1191 // to explicitly form the 64-bit value if any.
1192 if (DstSize > 32) {
1193 unsigned ExtSrc = MRI.createVirtualRegister(&AArch64::GPR64allRegClass);
1194 BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
1195 .addDef(ExtSrc)
1196 .addImm(0)
1197 .addUse(SrcReg)
1198 .addImm(AArch64::sub_32);
1199 I.getOperand(1).setReg(ExtSrc);
Tim Northover3d38b3a2016-10-11 20:50:21 +00001200 }
Quentin Colombetcb629a82016-10-12 03:57:49 +00001201 return selectCopy(I, TII, MRI, TRI, RBI);
Tim Northover3d38b3a2016-10-11 20:50:21 +00001202 }
1203
1204 case TargetOpcode::G_ZEXT:
1205 case TargetOpcode::G_SEXT: {
1206 unsigned Opcode = I.getOpcode();
1207 const LLT DstTy = MRI.getType(I.getOperand(0).getReg()),
1208 SrcTy = MRI.getType(I.getOperand(1).getReg());
1209 const bool isSigned = Opcode == TargetOpcode::G_SEXT;
1210 const unsigned DefReg = I.getOperand(0).getReg();
1211 const unsigned SrcReg = I.getOperand(1).getReg();
1212 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1213
1214 if (RB.getID() != AArch64::GPRRegBankID) {
1215 DEBUG(dbgs() << TII.getName(I.getOpcode()) << " on bank: " << RB
1216 << ", expected: GPR\n");
1217 return false;
1218 }
1219
1220 MachineInstr *ExtI;
1221 if (DstTy == LLT::scalar(64)) {
1222 // FIXME: Can we avoid manually doing this?
1223 if (!RBI.constrainGenericRegister(SrcReg, AArch64::GPR32RegClass, MRI)) {
1224 DEBUG(dbgs() << "Failed to constrain " << TII.getName(Opcode)
1225 << " operand\n");
1226 return false;
1227 }
1228
1229 const unsigned SrcXReg =
1230 MRI.createVirtualRegister(&AArch64::GPR64RegClass);
1231 BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
1232 .addDef(SrcXReg)
1233 .addImm(0)
1234 .addUse(SrcReg)
1235 .addImm(AArch64::sub_32);
1236
1237 const unsigned NewOpc = isSigned ? AArch64::SBFMXri : AArch64::UBFMXri;
1238 ExtI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
1239 .addDef(DefReg)
1240 .addUse(SrcXReg)
1241 .addImm(0)
1242 .addImm(SrcTy.getSizeInBits() - 1);
Tim Northovera9105be2016-11-09 22:39:54 +00001243 } else if (DstTy.isScalar() && DstTy.getSizeInBits() <= 32) {
Tim Northover3d38b3a2016-10-11 20:50:21 +00001244 const unsigned NewOpc = isSigned ? AArch64::SBFMWri : AArch64::UBFMWri;
1245 ExtI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
1246 .addDef(DefReg)
1247 .addUse(SrcReg)
1248 .addImm(0)
1249 .addImm(SrcTy.getSizeInBits() - 1);
1250 } else {
1251 return false;
1252 }
1253
1254 constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1255
1256 I.eraseFromParent();
1257 return true;
1258 }
Tim Northoverc1d8c2b2016-10-11 22:29:23 +00001259
Tim Northover69271c62016-10-12 22:49:11 +00001260 case TargetOpcode::G_SITOFP:
1261 case TargetOpcode::G_UITOFP:
1262 case TargetOpcode::G_FPTOSI:
1263 case TargetOpcode::G_FPTOUI: {
1264 const LLT DstTy = MRI.getType(I.getOperand(0).getReg()),
1265 SrcTy = MRI.getType(I.getOperand(1).getReg());
1266 const unsigned NewOpc = selectFPConvOpc(Opcode, DstTy, SrcTy);
1267 if (NewOpc == Opcode)
1268 return false;
1269
1270 I.setDesc(TII.get(NewOpc));
1271 constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1272
1273 return true;
1274 }
1275
1276
Tim Northoverc1d8c2b2016-10-11 22:29:23 +00001277 case TargetOpcode::G_INTTOPTR:
Daniel Sandersedd07842017-08-17 09:26:14 +00001278 // The importer is currently unable to import pointer types since they
1279 // didn't exist in SelectionDAG.
Daniel Sanderseb2f5f32017-08-15 15:10:31 +00001280 return selectCopy(I, TII, MRI, TRI, RBI);
Daniel Sanders16e6dd32017-08-15 13:50:09 +00001281
Daniel Sandersedd07842017-08-17 09:26:14 +00001282 case TargetOpcode::G_BITCAST:
1283 // Imported SelectionDAG rules can handle every bitcast except those that
1284 // bitcast from a type to the same type. Ideally, these shouldn't occur
1285 // but we might not run an optimizer that deletes them.
1286 if (MRI.getType(I.getOperand(0).getReg()) ==
1287 MRI.getType(I.getOperand(1).getReg()))
1288 return selectCopy(I, TII, MRI, TRI, RBI);
1289 return false;
1290
Tim Northover9ac0eba2016-11-08 00:45:29 +00001291 case TargetOpcode::G_SELECT: {
1292 if (MRI.getType(I.getOperand(1).getReg()) != LLT::scalar(1)) {
1293 DEBUG(dbgs() << "G_SELECT cond has type: " << Ty
1294 << ", expected: " << LLT::scalar(1) << '\n');
1295 return false;
1296 }
1297
1298 const unsigned CondReg = I.getOperand(1).getReg();
1299 const unsigned TReg = I.getOperand(2).getReg();
1300 const unsigned FReg = I.getOperand(3).getReg();
1301
1302 unsigned CSelOpc = 0;
1303
1304 if (Ty == LLT::scalar(32)) {
1305 CSelOpc = AArch64::CSELWr;
Kristof Beylse9412b42017-01-19 13:32:14 +00001306 } else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) {
Tim Northover9ac0eba2016-11-08 00:45:29 +00001307 CSelOpc = AArch64::CSELXr;
1308 } else {
1309 return false;
1310 }
1311
1312 MachineInstr &TstMI =
1313 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ANDSWri))
1314 .addDef(AArch64::WZR)
1315 .addUse(CondReg)
1316 .addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
1317
1318 MachineInstr &CSelMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CSelOpc))
1319 .addDef(I.getOperand(0).getReg())
1320 .addUse(TReg)
1321 .addUse(FReg)
1322 .addImm(AArch64CC::NE);
1323
1324 constrainSelectedInstRegOperands(TstMI, TII, TRI, RBI);
1325 constrainSelectedInstRegOperands(CSelMI, TII, TRI, RBI);
1326
1327 I.eraseFromParent();
1328 return true;
1329 }
Tim Northover6c02ad52016-10-12 22:49:04 +00001330 case TargetOpcode::G_ICMP: {
Aditya Nandakumar02c602e2017-07-31 17:00:16 +00001331 if (Ty != LLT::scalar(32)) {
Tim Northover6c02ad52016-10-12 22:49:04 +00001332 DEBUG(dbgs() << "G_ICMP result has type: " << Ty
Aditya Nandakumar02c602e2017-07-31 17:00:16 +00001333 << ", expected: " << LLT::scalar(32) << '\n');
Tim Northover6c02ad52016-10-12 22:49:04 +00001334 return false;
1335 }
1336
1337 unsigned CmpOpc = 0;
1338 unsigned ZReg = 0;
1339
1340 LLT CmpTy = MRI.getType(I.getOperand(2).getReg());
1341 if (CmpTy == LLT::scalar(32)) {
1342 CmpOpc = AArch64::SUBSWrr;
1343 ZReg = AArch64::WZR;
1344 } else if (CmpTy == LLT::scalar(64) || CmpTy.isPointer()) {
1345 CmpOpc = AArch64::SUBSXrr;
1346 ZReg = AArch64::XZR;
1347 } else {
1348 return false;
1349 }
1350
Kristof Beyls22524402017-01-05 10:16:08 +00001351 // CSINC increments the result by one when the condition code is false.
1352 // Therefore, we have to invert the predicate to get an increment by 1 when
1353 // the predicate is true.
1354 const AArch64CC::CondCode invCC =
1355 changeICMPPredToAArch64CC(CmpInst::getInversePredicate(
1356 (CmpInst::Predicate)I.getOperand(1).getPredicate()));
Tim Northover6c02ad52016-10-12 22:49:04 +00001357
1358 MachineInstr &CmpMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CmpOpc))
1359 .addDef(ZReg)
1360 .addUse(I.getOperand(2).getReg())
1361 .addUse(I.getOperand(3).getReg());
1362
1363 MachineInstr &CSetMI =
1364 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
1365 .addDef(I.getOperand(0).getReg())
1366 .addUse(AArch64::WZR)
1367 .addUse(AArch64::WZR)
Kristof Beyls22524402017-01-05 10:16:08 +00001368 .addImm(invCC);
Tim Northover6c02ad52016-10-12 22:49:04 +00001369
1370 constrainSelectedInstRegOperands(CmpMI, TII, TRI, RBI);
1371 constrainSelectedInstRegOperands(CSetMI, TII, TRI, RBI);
1372
1373 I.eraseFromParent();
1374 return true;
1375 }
1376
Tim Northover7dd378d2016-10-12 22:49:07 +00001377 case TargetOpcode::G_FCMP: {
Aditya Nandakumar02c602e2017-07-31 17:00:16 +00001378 if (Ty != LLT::scalar(32)) {
Tim Northover7dd378d2016-10-12 22:49:07 +00001379 DEBUG(dbgs() << "G_FCMP result has type: " << Ty
Aditya Nandakumar02c602e2017-07-31 17:00:16 +00001380 << ", expected: " << LLT::scalar(32) << '\n');
Tim Northover7dd378d2016-10-12 22:49:07 +00001381 return false;
1382 }
1383
1384 unsigned CmpOpc = 0;
1385 LLT CmpTy = MRI.getType(I.getOperand(2).getReg());
1386 if (CmpTy == LLT::scalar(32)) {
1387 CmpOpc = AArch64::FCMPSrr;
1388 } else if (CmpTy == LLT::scalar(64)) {
1389 CmpOpc = AArch64::FCMPDrr;
1390 } else {
1391 return false;
1392 }
1393
1394 // FIXME: regbank
1395
1396 AArch64CC::CondCode CC1, CC2;
1397 changeFCMPPredToAArch64CC(
1398 (CmpInst::Predicate)I.getOperand(1).getPredicate(), CC1, CC2);
1399
1400 MachineInstr &CmpMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CmpOpc))
1401 .addUse(I.getOperand(2).getReg())
1402 .addUse(I.getOperand(3).getReg());
1403
1404 const unsigned DefReg = I.getOperand(0).getReg();
1405 unsigned Def1Reg = DefReg;
1406 if (CC2 != AArch64CC::AL)
1407 Def1Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
1408
1409 MachineInstr &CSetMI =
1410 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
1411 .addDef(Def1Reg)
1412 .addUse(AArch64::WZR)
1413 .addUse(AArch64::WZR)
Tim Northover33a1a0b2017-01-17 23:04:01 +00001414 .addImm(getInvertedCondCode(CC1));
Tim Northover7dd378d2016-10-12 22:49:07 +00001415
1416 if (CC2 != AArch64CC::AL) {
1417 unsigned Def2Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
1418 MachineInstr &CSet2MI =
1419 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
1420 .addDef(Def2Reg)
1421 .addUse(AArch64::WZR)
1422 .addUse(AArch64::WZR)
Tim Northover33a1a0b2017-01-17 23:04:01 +00001423 .addImm(getInvertedCondCode(CC2));
Tim Northover7dd378d2016-10-12 22:49:07 +00001424 MachineInstr &OrMI =
1425 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ORRWrr))
1426 .addDef(DefReg)
1427 .addUse(Def1Reg)
1428 .addUse(Def2Reg);
1429 constrainSelectedInstRegOperands(OrMI, TII, TRI, RBI);
1430 constrainSelectedInstRegOperands(CSet2MI, TII, TRI, RBI);
1431 }
1432
1433 constrainSelectedInstRegOperands(CmpMI, TII, TRI, RBI);
1434 constrainSelectedInstRegOperands(CSetMI, TII, TRI, RBI);
1435
1436 I.eraseFromParent();
1437 return true;
1438 }
Tim Northovere9600d82017-02-08 17:57:27 +00001439 case TargetOpcode::G_VASTART:
1440 return STI.isTargetDarwin() ? selectVaStartDarwin(I, MF, MRI)
1441 : selectVaStartAAPCS(I, MF, MRI);
Justin Bogner4fc69662017-07-12 17:32:32 +00001442 case TargetOpcode::G_IMPLICIT_DEF:
1443 I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
Amara Emerson58aea522018-02-02 01:44:43 +00001444 const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
1445 const unsigned DstReg = I.getOperand(0).getReg();
1446 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
1447 const TargetRegisterClass *DstRC =
1448 getRegClassForTypeOnBank(DstTy, DstRB, RBI);
1449 RBI.constrainGenericRegister(DstReg, *DstRC, MRI);
Justin Bogner4fc69662017-07-12 17:32:32 +00001450 return true;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001451 }
1452
1453 return false;
1454}
Daniel Sanders8a4bae92017-03-14 21:32:08 +00001455
1456/// SelectArithImmed - Select an immediate value that can be represented as
1457/// a 12-bit value shifted left by either 0 or 12. If so, return true with
1458/// Val set to the 12-bit value and Shift set to the shifter operand.
Daniel Sanders1e4569f2017-10-20 20:55:29 +00001459InstructionSelector::ComplexRendererFns
Daniel Sanders2deea182017-04-22 15:11:04 +00001460AArch64InstructionSelector::selectArithImmed(MachineOperand &Root) const {
Daniel Sanders8a4bae92017-03-14 21:32:08 +00001461 MachineInstr &MI = *Root.getParent();
1462 MachineBasicBlock &MBB = *MI.getParent();
1463 MachineFunction &MF = *MBB.getParent();
1464 MachineRegisterInfo &MRI = MF.getRegInfo();
1465
1466 // This function is called from the addsub_shifted_imm ComplexPattern,
1467 // which lists [imm] as the list of opcode it's interested in, however
1468 // we still need to check whether the operand is actually an immediate
1469 // here because the ComplexPattern opcode list is only used in
1470 // root-level opcode matching.
1471 uint64_t Immed;
1472 if (Root.isImm())
1473 Immed = Root.getImm();
1474 else if (Root.isCImm())
1475 Immed = Root.getCImm()->getZExtValue();
1476 else if (Root.isReg()) {
1477 MachineInstr *Def = MRI.getVRegDef(Root.getReg());
1478 if (Def->getOpcode() != TargetOpcode::G_CONSTANT)
Daniel Sandersdf39cba2017-10-15 18:22:54 +00001479 return None;
Daniel Sanders0e642022017-03-16 18:04:50 +00001480 MachineOperand &Op1 = Def->getOperand(1);
1481 if (!Op1.isCImm() || Op1.getCImm()->getBitWidth() > 64)
Daniel Sandersdf39cba2017-10-15 18:22:54 +00001482 return None;
Daniel Sanders0e642022017-03-16 18:04:50 +00001483 Immed = Op1.getCImm()->getZExtValue();
Daniel Sanders8a4bae92017-03-14 21:32:08 +00001484 } else
Daniel Sandersdf39cba2017-10-15 18:22:54 +00001485 return None;
Daniel Sanders8a4bae92017-03-14 21:32:08 +00001486
1487 unsigned ShiftAmt;
1488
1489 if (Immed >> 12 == 0) {
1490 ShiftAmt = 0;
1491 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
1492 ShiftAmt = 12;
1493 Immed = Immed >> 12;
1494 } else
Daniel Sandersdf39cba2017-10-15 18:22:54 +00001495 return None;
Daniel Sanders8a4bae92017-03-14 21:32:08 +00001496
1497 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
Daniel Sandersdf39cba2017-10-15 18:22:54 +00001498 return {{
1499 [=](MachineInstrBuilder &MIB) { MIB.addImm(Immed); },
1500 [=](MachineInstrBuilder &MIB) { MIB.addImm(ShVal); },
1501 }};
Daniel Sanders8a4bae92017-03-14 21:32:08 +00001502}
Daniel Sanders0b5293f2017-04-06 09:49:34 +00001503
Daniel Sandersea8711b2017-10-16 03:36:29 +00001504/// Select a "register plus unscaled signed 9-bit immediate" address. This
1505/// should only match when there is an offset that is not valid for a scaled
1506/// immediate addressing mode. The "Size" argument is the size in bytes of the
1507/// memory reference, which is needed here to know what is valid for a scaled
1508/// immediate.
Daniel Sanders1e4569f2017-10-20 20:55:29 +00001509InstructionSelector::ComplexRendererFns
Daniel Sandersea8711b2017-10-16 03:36:29 +00001510AArch64InstructionSelector::selectAddrModeUnscaled(MachineOperand &Root,
1511 unsigned Size) const {
1512 MachineRegisterInfo &MRI =
1513 Root.getParent()->getParent()->getParent()->getRegInfo();
1514
1515 if (!Root.isReg())
1516 return None;
1517
1518 if (!isBaseWithConstantOffset(Root, MRI))
1519 return None;
1520
1521 MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
1522 if (!RootDef)
1523 return None;
1524
1525 MachineOperand &OffImm = RootDef->getOperand(2);
1526 if (!OffImm.isReg())
1527 return None;
1528 MachineInstr *RHS = MRI.getVRegDef(OffImm.getReg());
1529 if (!RHS || RHS->getOpcode() != TargetOpcode::G_CONSTANT)
1530 return None;
1531 int64_t RHSC;
1532 MachineOperand &RHSOp1 = RHS->getOperand(1);
1533 if (!RHSOp1.isCImm() || RHSOp1.getCImm()->getBitWidth() > 64)
1534 return None;
1535 RHSC = RHSOp1.getCImm()->getSExtValue();
1536
1537 // If the offset is valid as a scaled immediate, don't match here.
1538 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Log2_32(Size)))
1539 return None;
1540 if (RHSC >= -256 && RHSC < 256) {
1541 MachineOperand &Base = RootDef->getOperand(1);
1542 return {{
1543 [=](MachineInstrBuilder &MIB) { MIB.add(Base); },
1544 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
1545 }};
1546 }
1547 return None;
1548}
1549
1550/// Select a "register plus scaled unsigned 12-bit immediate" address. The
1551/// "Size" argument is the size in bytes of the memory reference, which
1552/// determines the scale.
Daniel Sanders1e4569f2017-10-20 20:55:29 +00001553InstructionSelector::ComplexRendererFns
Daniel Sandersea8711b2017-10-16 03:36:29 +00001554AArch64InstructionSelector::selectAddrModeIndexed(MachineOperand &Root,
1555 unsigned Size) const {
1556 MachineRegisterInfo &MRI =
1557 Root.getParent()->getParent()->getParent()->getRegInfo();
1558
1559 if (!Root.isReg())
1560 return None;
1561
1562 MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
1563 if (!RootDef)
1564 return None;
1565
1566 if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
1567 return {{
1568 [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
1569 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
1570 }};
1571 }
1572
1573 if (isBaseWithConstantOffset(Root, MRI)) {
1574 MachineOperand &LHS = RootDef->getOperand(1);
1575 MachineOperand &RHS = RootDef->getOperand(2);
1576 MachineInstr *LHSDef = MRI.getVRegDef(LHS.getReg());
1577 MachineInstr *RHSDef = MRI.getVRegDef(RHS.getReg());
1578 if (LHSDef && RHSDef) {
1579 int64_t RHSC = (int64_t)RHSDef->getOperand(1).getCImm()->getZExtValue();
1580 unsigned Scale = Log2_32(Size);
1581 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
1582 if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
Daniel Sanders01805b62017-10-16 05:39:30 +00001583 return {{
1584 [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); },
1585 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC >> Scale); },
1586 }};
1587
Daniel Sandersea8711b2017-10-16 03:36:29 +00001588 return {{
1589 [=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
1590 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC >> Scale); },
1591 }};
1592 }
1593 }
1594 }
1595
1596 // Before falling back to our general case, check if the unscaled
1597 // instructions can handle this. If so, that's preferable.
1598 if (selectAddrModeUnscaled(Root, Size).hasValue())
1599 return None;
1600
1601 return {{
1602 [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
1603 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
1604 }};
1605}
1606
Volkan Kelesf7f25682018-01-16 18:44:05 +00001607void AArch64InstructionSelector::renderTruncImm(MachineInstrBuilder &MIB,
1608 const MachineInstr &MI) const {
1609 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
1610 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
1611 Optional<int64_t> CstVal = getConstantVRegVal(MI.getOperand(0).getReg(), MRI);
1612 assert(CstVal && "Expected constant value");
1613 MIB.addImm(CstVal.getValue());
1614}
1615
Daniel Sanders0b5293f2017-04-06 09:49:34 +00001616namespace llvm {
1617InstructionSelector *
1618createAArch64InstructionSelector(const AArch64TargetMachine &TM,
1619 AArch64Subtarget &Subtarget,
1620 AArch64RegisterBankInfo &RBI) {
1621 return new AArch64InstructionSelector(TM, Subtarget, RBI);
1622}
1623}