blob: f08439c62621dc56ff81ba07ca576dacc49548e7 [file] [log] [blame]
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001//===- AArch64InstructionSelector.cpp ----------------------------*- C++ -*-==//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00006//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// AArch64.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +000014#include "AArch64InstrInfo.h"
Tim Northovere9600d82017-02-08 17:57:27 +000015#include "AArch64MachineFunctionInfo.h"
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +000016#include "AArch64RegisterBankInfo.h"
17#include "AArch64RegisterInfo.h"
18#include "AArch64Subtarget.h"
Tim Northoverbdf16242016-10-10 21:50:00 +000019#include "AArch64TargetMachine.h"
Tim Northover9ac0eba2016-11-08 00:45:29 +000020#include "MCTargetDesc/AArch64AddressingModes.h"
Daniel Sanders0b5293f2017-04-06 09:49:34 +000021#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
David Blaikie62651302017-10-26 23:39:54 +000022#include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
Amara Emerson1e8c1642018-07-31 00:09:02 +000023#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
Aditya Nandakumar75ad9cc2017-04-19 20:48:50 +000024#include "llvm/CodeGen/GlobalISel/Utils.h"
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +000025#include "llvm/CodeGen/MachineBasicBlock.h"
26#include "llvm/CodeGen/MachineFunction.h"
27#include "llvm/CodeGen/MachineInstr.h"
28#include "llvm/CodeGen/MachineInstrBuilder.h"
Daniel Sanders0b5293f2017-04-06 09:49:34 +000029#include "llvm/CodeGen/MachineOperand.h"
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +000030#include "llvm/CodeGen/MachineRegisterInfo.h"
31#include "llvm/IR/Type.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/Support/raw_ostream.h"
34
35#define DEBUG_TYPE "aarch64-isel"
36
37using namespace llvm;
38
Daniel Sanders0b5293f2017-04-06 09:49:34 +000039namespace {
40
Daniel Sanderse7b0d662017-04-21 15:59:56 +000041#define GET_GLOBALISEL_PREDICATE_BITSET
42#include "AArch64GenGlobalISel.inc"
43#undef GET_GLOBALISEL_PREDICATE_BITSET
44
Daniel Sanders0b5293f2017-04-06 09:49:34 +000045class AArch64InstructionSelector : public InstructionSelector {
46public:
47 AArch64InstructionSelector(const AArch64TargetMachine &TM,
48 const AArch64Subtarget &STI,
49 const AArch64RegisterBankInfo &RBI);
50
Daniel Sandersf76f3152017-11-16 00:46:35 +000051 bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const override;
David Blaikie62651302017-10-26 23:39:54 +000052 static const char *getName() { return DEBUG_TYPE; }
Daniel Sanders0b5293f2017-04-06 09:49:34 +000053
54private:
55 /// tblgen-erated 'select' implementation, used as the initial selector for
56 /// the patterns that don't require complex C++.
Daniel Sandersf76f3152017-11-16 00:46:35 +000057 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
Daniel Sanders0b5293f2017-04-06 09:49:34 +000058
59 bool selectVaStartAAPCS(MachineInstr &I, MachineFunction &MF,
60 MachineRegisterInfo &MRI) const;
61 bool selectVaStartDarwin(MachineInstr &I, MachineFunction &MF,
62 MachineRegisterInfo &MRI) const;
63
64 bool selectCompareBranch(MachineInstr &I, MachineFunction &MF,
65 MachineRegisterInfo &MRI) const;
66
Amara Emerson5ec14602018-12-10 18:44:58 +000067 // Helper to generate an equivalent of scalar_to_vector into a new register,
68 // returned via 'Dst'.
69 bool emitScalarToVector(unsigned &Dst, const LLT DstTy,
70 const TargetRegisterClass *DstRC, unsigned Scalar,
71 MachineBasicBlock &MBB,
72 MachineBasicBlock::iterator MBBI,
73 MachineRegisterInfo &MRI) const;
74 bool selectBuildVector(MachineInstr &I, MachineRegisterInfo &MRI) const;
Amara Emerson8cb186c2018-12-20 01:11:04 +000075 bool selectMergeValues(MachineInstr &I, MachineRegisterInfo &MRI) const;
Jessica Paquette245047d2019-01-24 22:00:41 +000076 bool selectUnmergeValues(MachineInstr &I, MachineRegisterInfo &MRI) const;
Amara Emerson5ec14602018-12-10 18:44:58 +000077
Daniel Sanders1e4569f2017-10-20 20:55:29 +000078 ComplexRendererFns selectArithImmed(MachineOperand &Root) const;
Daniel Sanders0b5293f2017-04-06 09:49:34 +000079
Daniel Sanders1e4569f2017-10-20 20:55:29 +000080 ComplexRendererFns selectAddrModeUnscaled(MachineOperand &Root,
81 unsigned Size) const;
Daniel Sandersea8711b2017-10-16 03:36:29 +000082
Daniel Sanders1e4569f2017-10-20 20:55:29 +000083 ComplexRendererFns selectAddrModeUnscaled8(MachineOperand &Root) const {
Daniel Sandersea8711b2017-10-16 03:36:29 +000084 return selectAddrModeUnscaled(Root, 1);
85 }
Daniel Sanders1e4569f2017-10-20 20:55:29 +000086 ComplexRendererFns selectAddrModeUnscaled16(MachineOperand &Root) const {
Daniel Sandersea8711b2017-10-16 03:36:29 +000087 return selectAddrModeUnscaled(Root, 2);
88 }
Daniel Sanders1e4569f2017-10-20 20:55:29 +000089 ComplexRendererFns selectAddrModeUnscaled32(MachineOperand &Root) const {
Daniel Sandersea8711b2017-10-16 03:36:29 +000090 return selectAddrModeUnscaled(Root, 4);
91 }
Daniel Sanders1e4569f2017-10-20 20:55:29 +000092 ComplexRendererFns selectAddrModeUnscaled64(MachineOperand &Root) const {
Daniel Sandersea8711b2017-10-16 03:36:29 +000093 return selectAddrModeUnscaled(Root, 8);
94 }
Daniel Sanders1e4569f2017-10-20 20:55:29 +000095 ComplexRendererFns selectAddrModeUnscaled128(MachineOperand &Root) const {
Daniel Sandersea8711b2017-10-16 03:36:29 +000096 return selectAddrModeUnscaled(Root, 16);
97 }
98
Daniel Sanders1e4569f2017-10-20 20:55:29 +000099 ComplexRendererFns selectAddrModeIndexed(MachineOperand &Root,
100 unsigned Size) const;
Daniel Sandersea8711b2017-10-16 03:36:29 +0000101 template <int Width>
Daniel Sanders1e4569f2017-10-20 20:55:29 +0000102 ComplexRendererFns selectAddrModeIndexed(MachineOperand &Root) const {
Daniel Sandersea8711b2017-10-16 03:36:29 +0000103 return selectAddrModeIndexed(Root, Width / 8);
104 }
105
Volkan Kelesf7f25682018-01-16 18:44:05 +0000106 void renderTruncImm(MachineInstrBuilder &MIB, const MachineInstr &MI) const;
107
Amara Emerson1e8c1642018-07-31 00:09:02 +0000108 // Materialize a GlobalValue or BlockAddress using a movz+movk sequence.
109 void materializeLargeCMVal(MachineInstr &I, const Value *V,
110 unsigned char OpFlags) const;
111
Daniel Sanders0b5293f2017-04-06 09:49:34 +0000112 const AArch64TargetMachine &TM;
113 const AArch64Subtarget &STI;
114 const AArch64InstrInfo &TII;
115 const AArch64RegisterInfo &TRI;
116 const AArch64RegisterBankInfo &RBI;
Daniel Sanderse7b0d662017-04-21 15:59:56 +0000117
Daniel Sanderse9fdba32017-04-29 17:30:09 +0000118#define GET_GLOBALISEL_PREDICATES_DECL
119#include "AArch64GenGlobalISel.inc"
120#undef GET_GLOBALISEL_PREDICATES_DECL
Daniel Sanders0b5293f2017-04-06 09:49:34 +0000121
122// We declare the temporaries used by selectImpl() in the class to minimize the
123// cost of constructing placeholder values.
124#define GET_GLOBALISEL_TEMPORARIES_DECL
125#include "AArch64GenGlobalISel.inc"
126#undef GET_GLOBALISEL_TEMPORARIES_DECL
127};
128
129} // end anonymous namespace
130
Daniel Sanders8a4bae92017-03-14 21:32:08 +0000131#define GET_GLOBALISEL_IMPL
Ahmed Bougacha36f70352016-12-21 23:26:20 +0000132#include "AArch64GenGlobalISel.inc"
Daniel Sanders8a4bae92017-03-14 21:32:08 +0000133#undef GET_GLOBALISEL_IMPL
Ahmed Bougacha36f70352016-12-21 23:26:20 +0000134
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000135AArch64InstructionSelector::AArch64InstructionSelector(
Tim Northoverbdf16242016-10-10 21:50:00 +0000136 const AArch64TargetMachine &TM, const AArch64Subtarget &STI,
137 const AArch64RegisterBankInfo &RBI)
Daniel Sanders8a4bae92017-03-14 21:32:08 +0000138 : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
Daniel Sanderse9fdba32017-04-29 17:30:09 +0000139 TRI(*STI.getRegisterInfo()), RBI(RBI),
140#define GET_GLOBALISEL_PREDICATES_INIT
141#include "AArch64GenGlobalISel.inc"
142#undef GET_GLOBALISEL_PREDICATES_INIT
Daniel Sanders8a4bae92017-03-14 21:32:08 +0000143#define GET_GLOBALISEL_TEMPORARIES_INIT
144#include "AArch64GenGlobalISel.inc"
145#undef GET_GLOBALISEL_TEMPORARIES_INIT
146{
147}
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000148
Tim Northoverfb8d9892016-10-12 22:49:15 +0000149// FIXME: This should be target-independent, inferred from the types declared
150// for each class in the bank.
151static const TargetRegisterClass *
152getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB,
Amara Emerson3838ed02018-02-02 18:03:30 +0000153 const RegisterBankInfo &RBI,
154 bool GetAllRegSet = false) {
Tim Northoverfb8d9892016-10-12 22:49:15 +0000155 if (RB.getID() == AArch64::GPRRegBankID) {
156 if (Ty.getSizeInBits() <= 32)
Amara Emerson3838ed02018-02-02 18:03:30 +0000157 return GetAllRegSet ? &AArch64::GPR32allRegClass
158 : &AArch64::GPR32RegClass;
Tim Northoverfb8d9892016-10-12 22:49:15 +0000159 if (Ty.getSizeInBits() == 64)
Amara Emerson3838ed02018-02-02 18:03:30 +0000160 return GetAllRegSet ? &AArch64::GPR64allRegClass
161 : &AArch64::GPR64RegClass;
Tim Northoverfb8d9892016-10-12 22:49:15 +0000162 return nullptr;
163 }
164
165 if (RB.getID() == AArch64::FPRRegBankID) {
Amara Emerson3838ed02018-02-02 18:03:30 +0000166 if (Ty.getSizeInBits() <= 16)
167 return &AArch64::FPR16RegClass;
Tim Northoverfb8d9892016-10-12 22:49:15 +0000168 if (Ty.getSizeInBits() == 32)
169 return &AArch64::FPR32RegClass;
170 if (Ty.getSizeInBits() == 64)
171 return &AArch64::FPR64RegClass;
172 if (Ty.getSizeInBits() == 128)
173 return &AArch64::FPR128RegClass;
174 return nullptr;
175 }
176
177 return nullptr;
178}
179
Jessica Paquette245047d2019-01-24 22:00:41 +0000180/// Given a register bank, and size in bits, return the smallest register class
181/// that can represent that combination.
182const TargetRegisterClass *getMinClassForRegBank(const RegisterBank &RB,
183 unsigned SizeInBits,
184 bool GetAllRegSet = false) {
185 unsigned RegBankID = RB.getID();
186
187 if (RegBankID == AArch64::GPRRegBankID) {
188 if (SizeInBits <= 32)
189 return GetAllRegSet ? &AArch64::GPR32allRegClass
190 : &AArch64::GPR32RegClass;
191 if (SizeInBits == 64)
192 return GetAllRegSet ? &AArch64::GPR64allRegClass
193 : &AArch64::GPR64RegClass;
194 }
195
196 if (RegBankID == AArch64::FPRRegBankID) {
197 switch (SizeInBits) {
198 default:
199 return nullptr;
200 case 8:
201 return &AArch64::FPR8RegClass;
202 case 16:
203 return &AArch64::FPR16RegClass;
204 case 32:
205 return &AArch64::FPR32RegClass;
206 case 64:
207 return &AArch64::FPR64RegClass;
208 case 128:
209 return &AArch64::FPR128RegClass;
210 }
211 }
212
213 return nullptr;
214}
215
216/// Returns the correct subregister to use for a given register class.
217static bool getSubRegForClass(const TargetRegisterClass *RC,
218 const TargetRegisterInfo &TRI, unsigned &SubReg) {
219 switch (TRI.getRegSizeInBits(*RC)) {
220 case 8:
221 SubReg = AArch64::bsub;
222 break;
223 case 16:
224 SubReg = AArch64::hsub;
225 break;
226 case 32:
227 if (RC == &AArch64::GPR32RegClass)
228 SubReg = AArch64::sub_32;
229 else
230 SubReg = AArch64::ssub;
231 break;
232 case 64:
233 SubReg = AArch64::dsub;
234 break;
235 default:
236 LLVM_DEBUG(
237 dbgs() << "Couldn't find appropriate subregister for register class.");
238 return false;
239 }
240
241 return true;
242}
243
Ahmed Bougacha59e160a2016-08-16 14:37:40 +0000244/// Check whether \p I is a currently unsupported binary operation:
245/// - it has an unsized type
246/// - an operand is not a vreg
247/// - all operands are not in the same bank
248/// These are checks that should someday live in the verifier, but right now,
249/// these are mostly limitations of the aarch64 selector.
250static bool unsupportedBinOp(const MachineInstr &I,
251 const AArch64RegisterBankInfo &RBI,
252 const MachineRegisterInfo &MRI,
253 const AArch64RegisterInfo &TRI) {
Tim Northover0f140c72016-09-09 11:46:34 +0000254 LLT Ty = MRI.getType(I.getOperand(0).getReg());
Tim Northover32a078a2016-09-15 10:09:59 +0000255 if (!Ty.isValid()) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000256 LLVM_DEBUG(dbgs() << "Generic binop register should be typed\n");
Ahmed Bougacha59e160a2016-08-16 14:37:40 +0000257 return true;
258 }
259
260 const RegisterBank *PrevOpBank = nullptr;
261 for (auto &MO : I.operands()) {
262 // FIXME: Support non-register operands.
263 if (!MO.isReg()) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000264 LLVM_DEBUG(dbgs() << "Generic inst non-reg operands are unsupported\n");
Ahmed Bougacha59e160a2016-08-16 14:37:40 +0000265 return true;
266 }
267
268 // FIXME: Can generic operations have physical registers operands? If
269 // so, this will need to be taught about that, and we'll need to get the
270 // bank out of the minimal class for the register.
271 // Either way, this needs to be documented (and possibly verified).
272 if (!TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000273 LLVM_DEBUG(dbgs() << "Generic inst has physical register operand\n");
Ahmed Bougacha59e160a2016-08-16 14:37:40 +0000274 return true;
275 }
276
277 const RegisterBank *OpBank = RBI.getRegBank(MO.getReg(), MRI, TRI);
278 if (!OpBank) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000279 LLVM_DEBUG(dbgs() << "Generic register has no bank or class\n");
Ahmed Bougacha59e160a2016-08-16 14:37:40 +0000280 return true;
281 }
282
283 if (PrevOpBank && OpBank != PrevOpBank) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000284 LLVM_DEBUG(dbgs() << "Generic inst operands have different banks\n");
Ahmed Bougacha59e160a2016-08-16 14:37:40 +0000285 return true;
286 }
287 PrevOpBank = OpBank;
288 }
289 return false;
290}
291
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000292/// Select the AArch64 opcode for the basic binary operation \p GenericOpc
Ahmed Bougachacfb384d2017-01-23 21:10:05 +0000293/// (such as G_OR or G_SDIV), appropriate for the register bank \p RegBankID
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000294/// and of size \p OpSize.
295/// \returns \p GenericOpc if the combination is unsupported.
296static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID,
297 unsigned OpSize) {
298 switch (RegBankID) {
299 case AArch64::GPRRegBankID:
Ahmed Bougacha05a5f7d2017-01-25 02:41:38 +0000300 if (OpSize == 32) {
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000301 switch (GenericOpc) {
Ahmed Bougacha2ac5bf92016-08-16 14:02:47 +0000302 case TargetOpcode::G_SHL:
303 return AArch64::LSLVWr;
304 case TargetOpcode::G_LSHR:
305 return AArch64::LSRVWr;
306 case TargetOpcode::G_ASHR:
307 return AArch64::ASRVWr;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000308 default:
309 return GenericOpc;
310 }
Tim Northover55782222016-10-18 20:03:48 +0000311 } else if (OpSize == 64) {
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000312 switch (GenericOpc) {
Tim Northover2fda4b02016-10-10 21:49:49 +0000313 case TargetOpcode::G_GEP:
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000314 return AArch64::ADDXrr;
Ahmed Bougacha2ac5bf92016-08-16 14:02:47 +0000315 case TargetOpcode::G_SHL:
316 return AArch64::LSLVXr;
317 case TargetOpcode::G_LSHR:
318 return AArch64::LSRVXr;
319 case TargetOpcode::G_ASHR:
320 return AArch64::ASRVXr;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000321 default:
322 return GenericOpc;
323 }
324 }
Simon Pilgrim9e901522017-07-08 19:28:24 +0000325 break;
Ahmed Bougacha33e19fe2016-08-18 16:05:11 +0000326 case AArch64::FPRRegBankID:
327 switch (OpSize) {
328 case 32:
329 switch (GenericOpc) {
330 case TargetOpcode::G_FADD:
331 return AArch64::FADDSrr;
332 case TargetOpcode::G_FSUB:
333 return AArch64::FSUBSrr;
334 case TargetOpcode::G_FMUL:
335 return AArch64::FMULSrr;
336 case TargetOpcode::G_FDIV:
337 return AArch64::FDIVSrr;
338 default:
339 return GenericOpc;
340 }
341 case 64:
342 switch (GenericOpc) {
343 case TargetOpcode::G_FADD:
344 return AArch64::FADDDrr;
345 case TargetOpcode::G_FSUB:
346 return AArch64::FSUBDrr;
347 case TargetOpcode::G_FMUL:
348 return AArch64::FMULDrr;
349 case TargetOpcode::G_FDIV:
350 return AArch64::FDIVDrr;
Quentin Colombet0e531272016-10-11 00:21:11 +0000351 case TargetOpcode::G_OR:
352 return AArch64::ORRv8i8;
Ahmed Bougacha33e19fe2016-08-18 16:05:11 +0000353 default:
354 return GenericOpc;
355 }
356 }
Simon Pilgrim9e901522017-07-08 19:28:24 +0000357 break;
358 }
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000359 return GenericOpc;
360}
361
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000362/// Select the AArch64 opcode for the G_LOAD or G_STORE operation \p GenericOpc,
363/// appropriate for the (value) register bank \p RegBankID and of memory access
364/// size \p OpSize. This returns the variant with the base+unsigned-immediate
365/// addressing mode (e.g., LDRXui).
366/// \returns \p GenericOpc if the combination is unsupported.
367static unsigned selectLoadStoreUIOp(unsigned GenericOpc, unsigned RegBankID,
368 unsigned OpSize) {
369 const bool isStore = GenericOpc == TargetOpcode::G_STORE;
370 switch (RegBankID) {
371 case AArch64::GPRRegBankID:
372 switch (OpSize) {
Tim Northover020d1042016-10-17 18:36:53 +0000373 case 8:
374 return isStore ? AArch64::STRBBui : AArch64::LDRBBui;
375 case 16:
376 return isStore ? AArch64::STRHHui : AArch64::LDRHHui;
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000377 case 32:
378 return isStore ? AArch64::STRWui : AArch64::LDRWui;
379 case 64:
380 return isStore ? AArch64::STRXui : AArch64::LDRXui;
381 }
Simon Pilgrim9e901522017-07-08 19:28:24 +0000382 break;
Quentin Colombetd2623f8e2016-10-11 00:21:14 +0000383 case AArch64::FPRRegBankID:
384 switch (OpSize) {
Tim Northover020d1042016-10-17 18:36:53 +0000385 case 8:
386 return isStore ? AArch64::STRBui : AArch64::LDRBui;
387 case 16:
388 return isStore ? AArch64::STRHui : AArch64::LDRHui;
Quentin Colombetd2623f8e2016-10-11 00:21:14 +0000389 case 32:
390 return isStore ? AArch64::STRSui : AArch64::LDRSui;
391 case 64:
392 return isStore ? AArch64::STRDui : AArch64::LDRDui;
393 }
Simon Pilgrim9e901522017-07-08 19:28:24 +0000394 break;
395 }
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000396 return GenericOpc;
397}
398
Jessica Paquette245047d2019-01-24 22:00:41 +0000399/// Helper function that verifies that we have a valid copy at the end of
400/// selectCopy. Verifies that the source and dest have the expected sizes and
401/// then returns true.
402static bool isValidCopy(const MachineInstr &I, const RegisterBank &DstBank,
403 const MachineRegisterInfo &MRI,
404 const TargetRegisterInfo &TRI,
405 const RegisterBankInfo &RBI) {
406 const unsigned DstReg = I.getOperand(0).getReg();
407 const unsigned SrcReg = I.getOperand(1).getReg();
408 const unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
409 const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
Amara Emersondb211892018-02-20 05:11:57 +0000410
Jessica Paquette245047d2019-01-24 22:00:41 +0000411 // Make sure the size of the source and dest line up.
412 assert(
413 (DstSize == SrcSize ||
414 // Copies are a mean to setup initial types, the number of
415 // bits may not exactly match.
416 (TargetRegisterInfo::isPhysicalRegister(SrcReg) && DstSize <= SrcSize) ||
417 // Copies are a mean to copy bits around, as long as we are
418 // on the same register class, that's fine. Otherwise, that
419 // means we need some SUBREG_TO_REG or AND & co.
420 (((DstSize + 31) / 32 == (SrcSize + 31) / 32) && DstSize > SrcSize)) &&
421 "Copy with different width?!");
422
423 // Check the size of the destination.
424 assert((DstSize <= 64 || DstBank.getID() == AArch64::FPRRegBankID) &&
425 "GPRs cannot get more than 64-bit width values");
426
427 return true;
428}
429
430/// Helper function for selectCopy. Inserts a subregister copy from
431/// \p *From to \p *To, linking it up to \p I.
432///
433/// e.g, given I = "Dst = COPY SrcReg", we'll transform that into
434///
435/// CopyReg (From class) = COPY SrcReg
436/// SubRegCopy (To class) = COPY CopyReg:SubReg
437/// Dst = COPY SubRegCopy
438static bool selectSubregisterCopy(MachineInstr &I, const TargetInstrInfo &TII,
439 MachineRegisterInfo &MRI,
440 const RegisterBankInfo &RBI, unsigned SrcReg,
441 const TargetRegisterClass *From,
442 const TargetRegisterClass *To,
443 unsigned SubReg) {
444 unsigned CopyReg = MRI.createVirtualRegister(From);
445 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::COPY), CopyReg)
446 .addUse(SrcReg);
447 unsigned SubRegCopy = MRI.createVirtualRegister(To);
448 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),
449 SubRegCopy)
450 .addUse(CopyReg, 0, SubReg);
Amara Emersondb211892018-02-20 05:11:57 +0000451 MachineOperand &RegOp = I.getOperand(1);
452 RegOp.setReg(SubRegCopy);
Jessica Paquette245047d2019-01-24 22:00:41 +0000453
454 // It's possible that the destination register won't be constrained. Make
455 // sure that happens.
456 if (!TargetRegisterInfo::isPhysicalRegister(I.getOperand(0).getReg()))
457 RBI.constrainGenericRegister(I.getOperand(0).getReg(), *To, MRI);
458
Amara Emersondb211892018-02-20 05:11:57 +0000459 return true;
460}
461
Quentin Colombetcb629a82016-10-12 03:57:49 +0000462static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII,
463 MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
464 const RegisterBankInfo &RBI) {
465
466 unsigned DstReg = I.getOperand(0).getReg();
Amara Emersondb211892018-02-20 05:11:57 +0000467 unsigned SrcReg = I.getOperand(1).getReg();
Jessica Paquette245047d2019-01-24 22:00:41 +0000468 const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
469 const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
470 const TargetRegisterClass *DstRC = getMinClassForRegBank(
471 DstRegBank, RBI.getSizeInBits(DstReg, MRI, TRI), true);
472 if (!DstRC) {
473 LLVM_DEBUG(dbgs() << "Unexpected dest size "
474 << RBI.getSizeInBits(DstReg, MRI, TRI) << '\n');
Amara Emerson3838ed02018-02-02 18:03:30 +0000475 return false;
Quentin Colombetcb629a82016-10-12 03:57:49 +0000476 }
477
Jessica Paquette245047d2019-01-24 22:00:41 +0000478 // A couple helpers below, for making sure that the copy we produce is valid.
479
480 // Set to true if we insert a SUBREG_TO_REG. If we do this, then we don't want
481 // to verify that the src and dst are the same size, since that's handled by
482 // the SUBREG_TO_REG.
483 bool KnownValid = false;
484
485 // Returns true, or asserts if something we don't expect happens. Instead of
486 // returning true, we return isValidCopy() to ensure that we verify the
487 // result.
488 auto CheckCopy = [&I, &DstRegBank, &MRI, &TRI, &RBI, &KnownValid]() {
489 // If we have a bitcast or something, we can't have physical registers.
490 assert(
491 I.isCopy() ||
492 (!TargetRegisterInfo::isPhysicalRegister(I.getOperand(0).getReg()) &&
493 !TargetRegisterInfo::isPhysicalRegister(I.getOperand(1).getReg())) &&
494 "No phys reg on generic operator!");
495 assert(KnownValid || isValidCopy(I, DstRegBank, MRI, TRI, RBI));
496 return true;
497 };
498
499 // Is this a copy? If so, then we may need to insert a subregister copy, or
500 // a SUBREG_TO_REG.
501 if (I.isCopy()) {
502 // Yes. Check if there's anything to fix up.
503 const TargetRegisterClass *SrcRC = getMinClassForRegBank(
504 SrcRegBank, RBI.getSizeInBits(SrcReg, MRI, TRI), true);
Amara Emerson7e9f3482018-02-18 17:10:49 +0000505 if (!SrcRC) {
Jessica Paquette245047d2019-01-24 22:00:41 +0000506 LLVM_DEBUG(dbgs() << "Couldn't determine source register class\n");
507 return false;
Amara Emerson7e9f3482018-02-18 17:10:49 +0000508 }
Jessica Paquette245047d2019-01-24 22:00:41 +0000509
510 // Is this a cross-bank copy?
511 if (DstRegBank.getID() != SrcRegBank.getID()) {
512 // If we're doing a cross-bank copy on different-sized registers, we need
513 // to do a bit more work.
514 unsigned SrcSize = TRI.getRegSizeInBits(*SrcRC);
515 unsigned DstSize = TRI.getRegSizeInBits(*DstRC);
516
517 if (SrcSize > DstSize) {
518 // We're doing a cross-bank copy into a smaller register. We need a
519 // subregister copy. First, get a register class that's on the same bank
520 // as the destination, but the same size as the source.
521 const TargetRegisterClass *SubregRC =
522 getMinClassForRegBank(DstRegBank, SrcSize, true);
523 assert(SubregRC && "Didn't get a register class for subreg?");
524
525 // Get the appropriate subregister for the destination.
526 unsigned SubReg = 0;
527 if (!getSubRegForClass(DstRC, TRI, SubReg)) {
528 LLVM_DEBUG(dbgs() << "Couldn't determine subregister for copy.\n");
529 return false;
530 }
531
532 // Now, insert a subregister copy using the new register class.
533 selectSubregisterCopy(I, TII, MRI, RBI, SrcReg, SubregRC, DstRC,
534 SubReg);
535 return CheckCopy();
536 }
537
538 else if (DstRegBank.getID() == AArch64::GPRRegBankID && DstSize == 32 &&
539 SrcSize == 16) {
540 // Special case for FPR16 to GPR32.
541 // FIXME: This can probably be generalized like the above case.
542 unsigned PromoteReg =
543 MRI.createVirtualRegister(&AArch64::FPR32RegClass);
544 BuildMI(*I.getParent(), I, I.getDebugLoc(),
545 TII.get(AArch64::SUBREG_TO_REG), PromoteReg)
546 .addImm(0)
547 .addUse(SrcReg)
548 .addImm(AArch64::hsub);
549 MachineOperand &RegOp = I.getOperand(1);
550 RegOp.setReg(PromoteReg);
551
552 // Promise that the copy is implicitly validated by the SUBREG_TO_REG.
553 KnownValid = true;
554 }
Amara Emerson7e9f3482018-02-18 17:10:49 +0000555 }
Jessica Paquette245047d2019-01-24 22:00:41 +0000556
557 // If the destination is a physical register, then there's nothing to
558 // change, so we're done.
559 if (TargetRegisterInfo::isPhysicalRegister(DstReg))
560 return CheckCopy();
Amara Emerson7e9f3482018-02-18 17:10:49 +0000561 }
562
Jessica Paquette245047d2019-01-24 22:00:41 +0000563 // No need to constrain SrcReg. It will get constrained when we hit another
564 // of its use or its defs. Copies do not have constraints.
565 if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000566 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
567 << " operand\n");
Quentin Colombetcb629a82016-10-12 03:57:49 +0000568 return false;
569 }
570 I.setDesc(TII.get(AArch64::COPY));
Jessica Paquette245047d2019-01-24 22:00:41 +0000571 return CheckCopy();
Quentin Colombetcb629a82016-10-12 03:57:49 +0000572}
573
Tim Northover69271c62016-10-12 22:49:11 +0000574static unsigned selectFPConvOpc(unsigned GenericOpc, LLT DstTy, LLT SrcTy) {
575 if (!DstTy.isScalar() || !SrcTy.isScalar())
576 return GenericOpc;
577
578 const unsigned DstSize = DstTy.getSizeInBits();
579 const unsigned SrcSize = SrcTy.getSizeInBits();
580
581 switch (DstSize) {
582 case 32:
583 switch (SrcSize) {
584 case 32:
585 switch (GenericOpc) {
586 case TargetOpcode::G_SITOFP:
587 return AArch64::SCVTFUWSri;
588 case TargetOpcode::G_UITOFP:
589 return AArch64::UCVTFUWSri;
590 case TargetOpcode::G_FPTOSI:
591 return AArch64::FCVTZSUWSr;
592 case TargetOpcode::G_FPTOUI:
593 return AArch64::FCVTZUUWSr;
594 default:
595 return GenericOpc;
596 }
597 case 64:
598 switch (GenericOpc) {
599 case TargetOpcode::G_SITOFP:
600 return AArch64::SCVTFUXSri;
601 case TargetOpcode::G_UITOFP:
602 return AArch64::UCVTFUXSri;
603 case TargetOpcode::G_FPTOSI:
604 return AArch64::FCVTZSUWDr;
605 case TargetOpcode::G_FPTOUI:
606 return AArch64::FCVTZUUWDr;
607 default:
608 return GenericOpc;
609 }
610 default:
611 return GenericOpc;
612 }
613 case 64:
614 switch (SrcSize) {
615 case 32:
616 switch (GenericOpc) {
617 case TargetOpcode::G_SITOFP:
618 return AArch64::SCVTFUWDri;
619 case TargetOpcode::G_UITOFP:
620 return AArch64::UCVTFUWDri;
621 case TargetOpcode::G_FPTOSI:
622 return AArch64::FCVTZSUXSr;
623 case TargetOpcode::G_FPTOUI:
624 return AArch64::FCVTZUUXSr;
625 default:
626 return GenericOpc;
627 }
628 case 64:
629 switch (GenericOpc) {
630 case TargetOpcode::G_SITOFP:
631 return AArch64::SCVTFUXDri;
632 case TargetOpcode::G_UITOFP:
633 return AArch64::UCVTFUXDri;
634 case TargetOpcode::G_FPTOSI:
635 return AArch64::FCVTZSUXDr;
636 case TargetOpcode::G_FPTOUI:
637 return AArch64::FCVTZUUXDr;
638 default:
639 return GenericOpc;
640 }
641 default:
642 return GenericOpc;
643 }
644 default:
645 return GenericOpc;
646 };
647 return GenericOpc;
648}
649
Tim Northover6c02ad52016-10-12 22:49:04 +0000650static AArch64CC::CondCode changeICMPPredToAArch64CC(CmpInst::Predicate P) {
651 switch (P) {
652 default:
653 llvm_unreachable("Unknown condition code!");
654 case CmpInst::ICMP_NE:
655 return AArch64CC::NE;
656 case CmpInst::ICMP_EQ:
657 return AArch64CC::EQ;
658 case CmpInst::ICMP_SGT:
659 return AArch64CC::GT;
660 case CmpInst::ICMP_SGE:
661 return AArch64CC::GE;
662 case CmpInst::ICMP_SLT:
663 return AArch64CC::LT;
664 case CmpInst::ICMP_SLE:
665 return AArch64CC::LE;
666 case CmpInst::ICMP_UGT:
667 return AArch64CC::HI;
668 case CmpInst::ICMP_UGE:
669 return AArch64CC::HS;
670 case CmpInst::ICMP_ULT:
671 return AArch64CC::LO;
672 case CmpInst::ICMP_ULE:
673 return AArch64CC::LS;
674 }
675}
676
Tim Northover7dd378d2016-10-12 22:49:07 +0000677static void changeFCMPPredToAArch64CC(CmpInst::Predicate P,
678 AArch64CC::CondCode &CondCode,
679 AArch64CC::CondCode &CondCode2) {
680 CondCode2 = AArch64CC::AL;
681 switch (P) {
682 default:
683 llvm_unreachable("Unknown FP condition!");
684 case CmpInst::FCMP_OEQ:
685 CondCode = AArch64CC::EQ;
686 break;
687 case CmpInst::FCMP_OGT:
688 CondCode = AArch64CC::GT;
689 break;
690 case CmpInst::FCMP_OGE:
691 CondCode = AArch64CC::GE;
692 break;
693 case CmpInst::FCMP_OLT:
694 CondCode = AArch64CC::MI;
695 break;
696 case CmpInst::FCMP_OLE:
697 CondCode = AArch64CC::LS;
698 break;
699 case CmpInst::FCMP_ONE:
700 CondCode = AArch64CC::MI;
701 CondCode2 = AArch64CC::GT;
702 break;
703 case CmpInst::FCMP_ORD:
704 CondCode = AArch64CC::VC;
705 break;
706 case CmpInst::FCMP_UNO:
707 CondCode = AArch64CC::VS;
708 break;
709 case CmpInst::FCMP_UEQ:
710 CondCode = AArch64CC::EQ;
711 CondCode2 = AArch64CC::VS;
712 break;
713 case CmpInst::FCMP_UGT:
714 CondCode = AArch64CC::HI;
715 break;
716 case CmpInst::FCMP_UGE:
717 CondCode = AArch64CC::PL;
718 break;
719 case CmpInst::FCMP_ULT:
720 CondCode = AArch64CC::LT;
721 break;
722 case CmpInst::FCMP_ULE:
723 CondCode = AArch64CC::LE;
724 break;
725 case CmpInst::FCMP_UNE:
726 CondCode = AArch64CC::NE;
727 break;
728 }
729}
730
Ahmed Bougacha641cb202017-03-27 16:35:31 +0000731bool AArch64InstructionSelector::selectCompareBranch(
732 MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
733
734 const unsigned CondReg = I.getOperand(0).getReg();
735 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
736 MachineInstr *CCMI = MRI.getVRegDef(CondReg);
Aditya Nandakumar02c602e2017-07-31 17:00:16 +0000737 if (CCMI->getOpcode() == TargetOpcode::G_TRUNC)
738 CCMI = MRI.getVRegDef(CCMI->getOperand(1).getReg());
Ahmed Bougacha641cb202017-03-27 16:35:31 +0000739 if (CCMI->getOpcode() != TargetOpcode::G_ICMP)
740 return false;
741
742 unsigned LHS = CCMI->getOperand(2).getReg();
743 unsigned RHS = CCMI->getOperand(3).getReg();
744 if (!getConstantVRegVal(RHS, MRI))
745 std::swap(RHS, LHS);
746
747 const auto RHSImm = getConstantVRegVal(RHS, MRI);
748 if (!RHSImm || *RHSImm != 0)
749 return false;
750
751 const RegisterBank &RB = *RBI.getRegBank(LHS, MRI, TRI);
752 if (RB.getID() != AArch64::GPRRegBankID)
753 return false;
754
755 const auto Pred = (CmpInst::Predicate)CCMI->getOperand(1).getPredicate();
756 if (Pred != CmpInst::ICMP_NE && Pred != CmpInst::ICMP_EQ)
757 return false;
758
759 const unsigned CmpWidth = MRI.getType(LHS).getSizeInBits();
760 unsigned CBOpc = 0;
761 if (CmpWidth <= 32)
762 CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZW : AArch64::CBNZW);
763 else if (CmpWidth == 64)
764 CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZX : AArch64::CBNZX);
765 else
766 return false;
767
Aditya Nandakumar18b3f9d2018-01-17 19:31:33 +0000768 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CBOpc))
769 .addUse(LHS)
770 .addMBB(DestMBB)
771 .constrainAllUses(TII, TRI, RBI);
Ahmed Bougacha641cb202017-03-27 16:35:31 +0000772
Ahmed Bougacha641cb202017-03-27 16:35:31 +0000773 I.eraseFromParent();
774 return true;
775}
776
Tim Northovere9600d82017-02-08 17:57:27 +0000777bool AArch64InstructionSelector::selectVaStartAAPCS(
778 MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
779 return false;
780}
781
782bool AArch64InstructionSelector::selectVaStartDarwin(
783 MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
784 AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
785 unsigned ListReg = I.getOperand(0).getReg();
786
787 unsigned ArgsAddrReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
788
789 auto MIB =
790 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::ADDXri))
791 .addDef(ArgsAddrReg)
792 .addFrameIndex(FuncInfo->getVarArgsStackIndex())
793 .addImm(0)
794 .addImm(0);
795
796 constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
797
798 MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::STRXui))
799 .addUse(ArgsAddrReg)
800 .addUse(ListReg)
801 .addImm(0)
802 .addMemOperand(*I.memoperands_begin());
803
804 constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
805 I.eraseFromParent();
806 return true;
807}
808
Amara Emerson1e8c1642018-07-31 00:09:02 +0000809void AArch64InstructionSelector::materializeLargeCMVal(
810 MachineInstr &I, const Value *V, unsigned char OpFlags) const {
811 MachineBasicBlock &MBB = *I.getParent();
812 MachineFunction &MF = *MBB.getParent();
813 MachineRegisterInfo &MRI = MF.getRegInfo();
814 MachineIRBuilder MIB(I);
815
Aditya Nandakumarcef44a22018-12-11 00:48:50 +0000816 auto MovZ = MIB.buildInstr(AArch64::MOVZXi, {&AArch64::GPR64RegClass}, {});
Amara Emerson1e8c1642018-07-31 00:09:02 +0000817 MovZ->addOperand(MF, I.getOperand(1));
818 MovZ->getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_G0 |
819 AArch64II::MO_NC);
820 MovZ->addOperand(MF, MachineOperand::CreateImm(0));
821 constrainSelectedInstRegOperands(*MovZ, TII, TRI, RBI);
822
823 auto BuildMovK = [&](unsigned SrcReg, unsigned char Flags, unsigned Offset,
824 unsigned ForceDstReg) {
825 unsigned DstReg = ForceDstReg
826 ? ForceDstReg
827 : MRI.createVirtualRegister(&AArch64::GPR64RegClass);
828 auto MovI = MIB.buildInstr(AArch64::MOVKXi).addDef(DstReg).addUse(SrcReg);
829 if (auto *GV = dyn_cast<GlobalValue>(V)) {
830 MovI->addOperand(MF, MachineOperand::CreateGA(
831 GV, MovZ->getOperand(1).getOffset(), Flags));
832 } else {
833 MovI->addOperand(
834 MF, MachineOperand::CreateBA(cast<BlockAddress>(V),
835 MovZ->getOperand(1).getOffset(), Flags));
836 }
837 MovI->addOperand(MF, MachineOperand::CreateImm(Offset));
838 constrainSelectedInstRegOperands(*MovI, TII, TRI, RBI);
839 return DstReg;
840 };
841 unsigned DstReg = BuildMovK(MovZ->getOperand(0).getReg(),
842 AArch64II::MO_G1 | AArch64II::MO_NC, 16, 0);
843 DstReg = BuildMovK(DstReg, AArch64II::MO_G2 | AArch64II::MO_NC, 32, 0);
844 BuildMovK(DstReg, AArch64II::MO_G3, 48, I.getOperand(0).getReg());
845 return;
846}
847
Daniel Sandersf76f3152017-11-16 00:46:35 +0000848bool AArch64InstructionSelector::select(MachineInstr &I,
849 CodeGenCoverage &CoverageInfo) const {
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000850 assert(I.getParent() && "Instruction should be in a basic block!");
851 assert(I.getParent()->getParent() && "Instruction should be in a function!");
852
853 MachineBasicBlock &MBB = *I.getParent();
854 MachineFunction &MF = *MBB.getParent();
855 MachineRegisterInfo &MRI = MF.getRegInfo();
856
Tim Northovercdf23f12016-10-31 18:30:59 +0000857 unsigned Opcode = I.getOpcode();
Aditya Nandakumarefd8a842017-08-23 20:45:48 +0000858 // G_PHI requires same handling as PHI
859 if (!isPreISelGenericOpcode(Opcode) || Opcode == TargetOpcode::G_PHI) {
Tim Northovercdf23f12016-10-31 18:30:59 +0000860 // Certain non-generic instructions also need some special handling.
861
862 if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
863 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
Tim Northover7d88da62016-11-08 00:34:06 +0000864
Aditya Nandakumarefd8a842017-08-23 20:45:48 +0000865 if (Opcode == TargetOpcode::PHI || Opcode == TargetOpcode::G_PHI) {
Tim Northover7d88da62016-11-08 00:34:06 +0000866 const unsigned DefReg = I.getOperand(0).getReg();
867 const LLT DefTy = MRI.getType(DefReg);
868
869 const TargetRegisterClass *DefRC = nullptr;
870 if (TargetRegisterInfo::isPhysicalRegister(DefReg)) {
871 DefRC = TRI.getRegClass(DefReg);
872 } else {
873 const RegClassOrRegBank &RegClassOrBank =
874 MRI.getRegClassOrRegBank(DefReg);
875
876 DefRC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
877 if (!DefRC) {
878 if (!DefTy.isValid()) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000879 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
Tim Northover7d88da62016-11-08 00:34:06 +0000880 return false;
881 }
882 const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
883 DefRC = getRegClassForTypeOnBank(DefTy, RB, RBI);
884 if (!DefRC) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000885 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
Tim Northover7d88da62016-11-08 00:34:06 +0000886 return false;
887 }
888 }
889 }
Aditya Nandakumarefd8a842017-08-23 20:45:48 +0000890 I.setDesc(TII.get(TargetOpcode::PHI));
Tim Northover7d88da62016-11-08 00:34:06 +0000891
892 return RBI.constrainGenericRegister(DefReg, *DefRC, MRI);
893 }
894
895 if (I.isCopy())
Tim Northovercdf23f12016-10-31 18:30:59 +0000896 return selectCopy(I, TII, MRI, TRI, RBI);
Tim Northover7d88da62016-11-08 00:34:06 +0000897
898 return true;
Tim Northovercdf23f12016-10-31 18:30:59 +0000899 }
900
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000901
902 if (I.getNumOperands() != I.getNumExplicitOperands()) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000903 LLVM_DEBUG(
904 dbgs() << "Generic instruction has unexpected implicit operands\n");
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000905 return false;
906 }
907
Daniel Sandersf76f3152017-11-16 00:46:35 +0000908 if (selectImpl(I, CoverageInfo))
Ahmed Bougacha36f70352016-12-21 23:26:20 +0000909 return true;
910
Tim Northover32a078a2016-09-15 10:09:59 +0000911 LLT Ty =
912 I.getOperand(0).isReg() ? MRI.getType(I.getOperand(0).getReg()) : LLT{};
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000913
Tim Northover69271c62016-10-12 22:49:11 +0000914 switch (Opcode) {
Tim Northover5e3dbf32016-10-12 22:49:01 +0000915 case TargetOpcode::G_BRCOND: {
916 if (Ty.getSizeInBits() > 32) {
917 // We shouldn't need this on AArch64, but it would be implemented as an
918 // EXTRACT_SUBREG followed by a TBNZW because TBNZX has no encoding if the
919 // bit being tested is < 32.
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000920 LLVM_DEBUG(dbgs() << "G_BRCOND has type: " << Ty
921 << ", expected at most 32-bits");
Tim Northover5e3dbf32016-10-12 22:49:01 +0000922 return false;
923 }
924
925 const unsigned CondReg = I.getOperand(0).getReg();
926 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
927
Kristof Beylse66bc1f2018-12-18 08:50:02 +0000928 // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z
929 // instructions will not be produced, as they are conditional branch
930 // instructions that do not set flags.
931 bool ProduceNonFlagSettingCondBr =
932 !MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening);
933 if (ProduceNonFlagSettingCondBr && selectCompareBranch(I, MF, MRI))
Ahmed Bougacha641cb202017-03-27 16:35:31 +0000934 return true;
935
Kristof Beylse66bc1f2018-12-18 08:50:02 +0000936 if (ProduceNonFlagSettingCondBr) {
937 auto MIB = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::TBNZW))
938 .addUse(CondReg)
939 .addImm(/*bit offset=*/0)
940 .addMBB(DestMBB);
Tim Northover5e3dbf32016-10-12 22:49:01 +0000941
Kristof Beylse66bc1f2018-12-18 08:50:02 +0000942 I.eraseFromParent();
943 return constrainSelectedInstRegOperands(*MIB.getInstr(), TII, TRI, RBI);
944 } else {
945 auto CMP = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ANDSWri))
946 .addDef(AArch64::WZR)
947 .addUse(CondReg)
948 .addImm(1);
949 constrainSelectedInstRegOperands(*CMP.getInstr(), TII, TRI, RBI);
950 auto Bcc =
951 BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::Bcc))
952 .addImm(AArch64CC::EQ)
953 .addMBB(DestMBB);
954
955 I.eraseFromParent();
956 return constrainSelectedInstRegOperands(*Bcc.getInstr(), TII, TRI, RBI);
957 }
Tim Northover5e3dbf32016-10-12 22:49:01 +0000958 }
959
Kristof Beyls65a12c02017-01-30 09:13:18 +0000960 case TargetOpcode::G_BRINDIRECT: {
961 I.setDesc(TII.get(AArch64::BR));
962 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
963 }
964
Tim Northover4494d692016-10-18 19:47:57 +0000965 case TargetOpcode::G_FCONSTANT:
Tim Northover4edc60d2016-10-10 21:49:42 +0000966 case TargetOpcode::G_CONSTANT: {
Tim Northover4494d692016-10-18 19:47:57 +0000967 const bool isFP = Opcode == TargetOpcode::G_FCONSTANT;
968
969 const LLT s32 = LLT::scalar(32);
970 const LLT s64 = LLT::scalar(64);
971 const LLT p0 = LLT::pointer(0, 64);
972
973 const unsigned DefReg = I.getOperand(0).getReg();
974 const LLT DefTy = MRI.getType(DefReg);
975 const unsigned DefSize = DefTy.getSizeInBits();
976 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
977
978 // FIXME: Redundant check, but even less readable when factored out.
979 if (isFP) {
980 if (Ty != s32 && Ty != s64) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000981 LLVM_DEBUG(dbgs() << "Unable to materialize FP " << Ty
982 << " constant, expected: " << s32 << " or " << s64
983 << '\n');
Tim Northover4494d692016-10-18 19:47:57 +0000984 return false;
985 }
986
987 if (RB.getID() != AArch64::FPRRegBankID) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000988 LLVM_DEBUG(dbgs() << "Unable to materialize FP " << Ty
989 << " constant on bank: " << RB
990 << ", expected: FPR\n");
Tim Northover4494d692016-10-18 19:47:57 +0000991 return false;
992 }
Daniel Sanders11300ce2017-10-13 21:28:03 +0000993
994 // The case when we have 0.0 is covered by tablegen. Reject it here so we
995 // can be sure tablegen works correctly and isn't rescued by this code.
996 if (I.getOperand(1).getFPImm()->getValueAPF().isExactlyValue(0.0))
997 return false;
Tim Northover4494d692016-10-18 19:47:57 +0000998 } else {
Daniel Sanders05540042017-08-08 10:44:31 +0000999 // s32 and s64 are covered by tablegen.
1000 if (Ty != p0) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001001 LLVM_DEBUG(dbgs() << "Unable to materialize integer " << Ty
1002 << " constant, expected: " << s32 << ", " << s64
1003 << ", or " << p0 << '\n');
Tim Northover4494d692016-10-18 19:47:57 +00001004 return false;
1005 }
1006
1007 if (RB.getID() != AArch64::GPRRegBankID) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001008 LLVM_DEBUG(dbgs() << "Unable to materialize integer " << Ty
1009 << " constant on bank: " << RB
1010 << ", expected: GPR\n");
Tim Northover4494d692016-10-18 19:47:57 +00001011 return false;
1012 }
1013 }
1014
1015 const unsigned MovOpc =
1016 DefSize == 32 ? AArch64::MOVi32imm : AArch64::MOVi64imm;
1017
1018 I.setDesc(TII.get(MovOpc));
1019
1020 if (isFP) {
1021 const TargetRegisterClass &GPRRC =
1022 DefSize == 32 ? AArch64::GPR32RegClass : AArch64::GPR64RegClass;
1023 const TargetRegisterClass &FPRRC =
1024 DefSize == 32 ? AArch64::FPR32RegClass : AArch64::FPR64RegClass;
1025
1026 const unsigned DefGPRReg = MRI.createVirtualRegister(&GPRRC);
1027 MachineOperand &RegOp = I.getOperand(0);
1028 RegOp.setReg(DefGPRReg);
1029
1030 BuildMI(MBB, std::next(I.getIterator()), I.getDebugLoc(),
1031 TII.get(AArch64::COPY))
1032 .addDef(DefReg)
1033 .addUse(DefGPRReg);
1034
1035 if (!RBI.constrainGenericRegister(DefReg, FPRRC, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001036 LLVM_DEBUG(dbgs() << "Failed to constrain G_FCONSTANT def operand\n");
Tim Northover4494d692016-10-18 19:47:57 +00001037 return false;
1038 }
1039
1040 MachineOperand &ImmOp = I.getOperand(1);
1041 // FIXME: Is going through int64_t always correct?
1042 ImmOp.ChangeToImmediate(
1043 ImmOp.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
Daniel Sanders066ebbf2017-02-24 15:43:30 +00001044 } else if (I.getOperand(1).isCImm()) {
Tim Northover9267ac52016-12-05 21:47:07 +00001045 uint64_t Val = I.getOperand(1).getCImm()->getZExtValue();
1046 I.getOperand(1).ChangeToImmediate(Val);
Daniel Sanders066ebbf2017-02-24 15:43:30 +00001047 } else if (I.getOperand(1).isImm()) {
1048 uint64_t Val = I.getOperand(1).getImm();
1049 I.getOperand(1).ChangeToImmediate(Val);
Tim Northover4494d692016-10-18 19:47:57 +00001050 }
1051
1052 constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1053 return true;
Tim Northover4edc60d2016-10-10 21:49:42 +00001054 }
Tim Northover7b6d66c2017-07-20 22:58:38 +00001055 case TargetOpcode::G_EXTRACT: {
1056 LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
Amara Emersonbc03bae2018-02-18 17:03:02 +00001057 LLT DstTy = MRI.getType(I.getOperand(0).getReg());
Amara Emerson242efdb2018-02-18 17:28:34 +00001058 (void)DstTy;
Amara Emersonbc03bae2018-02-18 17:03:02 +00001059 unsigned SrcSize = SrcTy.getSizeInBits();
Tim Northover7b6d66c2017-07-20 22:58:38 +00001060 // Larger extracts are vectors, same-size extracts should be something else
1061 // by now (either split up or simplified to a COPY).
1062 if (SrcTy.getSizeInBits() > 64 || Ty.getSizeInBits() > 32)
1063 return false;
1064
Amara Emersonbc03bae2018-02-18 17:03:02 +00001065 I.setDesc(TII.get(SrcSize == 64 ? AArch64::UBFMXri : AArch64::UBFMWri));
Tim Northover7b6d66c2017-07-20 22:58:38 +00001066 MachineInstrBuilder(MF, I).addImm(I.getOperand(2).getImm() +
1067 Ty.getSizeInBits() - 1);
1068
Amara Emersonbc03bae2018-02-18 17:03:02 +00001069 if (SrcSize < 64) {
1070 assert(SrcSize == 32 && DstTy.getSizeInBits() == 16 &&
1071 "unexpected G_EXTRACT types");
1072 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1073 }
1074
Tim Northover7b6d66c2017-07-20 22:58:38 +00001075 unsigned DstReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
1076 BuildMI(MBB, std::next(I.getIterator()), I.getDebugLoc(),
1077 TII.get(AArch64::COPY))
1078 .addDef(I.getOperand(0).getReg())
1079 .addUse(DstReg, 0, AArch64::sub_32);
1080 RBI.constrainGenericRegister(I.getOperand(0).getReg(),
1081 AArch64::GPR32RegClass, MRI);
1082 I.getOperand(0).setReg(DstReg);
1083
1084 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1085 }
1086
1087 case TargetOpcode::G_INSERT: {
1088 LLT SrcTy = MRI.getType(I.getOperand(2).getReg());
Amara Emersonbc03bae2018-02-18 17:03:02 +00001089 LLT DstTy = MRI.getType(I.getOperand(0).getReg());
1090 unsigned DstSize = DstTy.getSizeInBits();
Tim Northover7b6d66c2017-07-20 22:58:38 +00001091 // Larger inserts are vectors, same-size ones should be something else by
1092 // now (split up or turned into COPYs).
1093 if (Ty.getSizeInBits() > 64 || SrcTy.getSizeInBits() > 32)
1094 return false;
1095
Amara Emersonbc03bae2018-02-18 17:03:02 +00001096 I.setDesc(TII.get(DstSize == 64 ? AArch64::BFMXri : AArch64::BFMWri));
Tim Northover7b6d66c2017-07-20 22:58:38 +00001097 unsigned LSB = I.getOperand(3).getImm();
1098 unsigned Width = MRI.getType(I.getOperand(2).getReg()).getSizeInBits();
Amara Emersonbc03bae2018-02-18 17:03:02 +00001099 I.getOperand(3).setImm((DstSize - LSB) % DstSize);
Tim Northover7b6d66c2017-07-20 22:58:38 +00001100 MachineInstrBuilder(MF, I).addImm(Width - 1);
1101
Amara Emersonbc03bae2018-02-18 17:03:02 +00001102 if (DstSize < 64) {
1103 assert(DstSize == 32 && SrcTy.getSizeInBits() == 16 &&
1104 "unexpected G_INSERT types");
1105 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1106 }
1107
Tim Northover7b6d66c2017-07-20 22:58:38 +00001108 unsigned SrcReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
1109 BuildMI(MBB, I.getIterator(), I.getDebugLoc(),
1110 TII.get(AArch64::SUBREG_TO_REG))
1111 .addDef(SrcReg)
1112 .addImm(0)
1113 .addUse(I.getOperand(2).getReg())
1114 .addImm(AArch64::sub_32);
1115 RBI.constrainGenericRegister(I.getOperand(2).getReg(),
1116 AArch64::GPR32RegClass, MRI);
1117 I.getOperand(2).setReg(SrcReg);
1118
1119 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1120 }
Ahmed Bougacha0306b5e2016-08-16 14:02:42 +00001121 case TargetOpcode::G_FRAME_INDEX: {
1122 // allocas and G_FRAME_INDEX are only supported in addrspace(0).
Tim Northover5ae83502016-09-15 09:20:34 +00001123 if (Ty != LLT::pointer(0, 64)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001124 LLVM_DEBUG(dbgs() << "G_FRAME_INDEX pointer has type: " << Ty
1125 << ", expected: " << LLT::pointer(0, 64) << '\n');
Ahmed Bougacha0306b5e2016-08-16 14:02:42 +00001126 return false;
1127 }
Ahmed Bougacha0306b5e2016-08-16 14:02:42 +00001128 I.setDesc(TII.get(AArch64::ADDXri));
Ahmed Bougacha0306b5e2016-08-16 14:02:42 +00001129
1130 // MOs for a #0 shifted immediate.
1131 I.addOperand(MachineOperand::CreateImm(0));
1132 I.addOperand(MachineOperand::CreateImm(0));
1133
1134 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1135 }
Tim Northoverbdf16242016-10-10 21:50:00 +00001136
1137 case TargetOpcode::G_GLOBAL_VALUE: {
1138 auto GV = I.getOperand(1).getGlobal();
1139 if (GV->isThreadLocal()) {
1140 // FIXME: we don't support TLS yet.
1141 return false;
1142 }
1143 unsigned char OpFlags = STI.ClassifyGlobalReference(GV, TM);
Tim Northoverfe7c59a2016-12-13 18:25:38 +00001144 if (OpFlags & AArch64II::MO_GOT) {
Tim Northoverbdf16242016-10-10 21:50:00 +00001145 I.setDesc(TII.get(AArch64::LOADgot));
Tim Northoverfe7c59a2016-12-13 18:25:38 +00001146 I.getOperand(1).setTargetFlags(OpFlags);
Amara Emersond5785772018-01-18 19:21:27 +00001147 } else if (TM.getCodeModel() == CodeModel::Large) {
1148 // Materialize the global using movz/movk instructions.
Amara Emerson1e8c1642018-07-31 00:09:02 +00001149 materializeLargeCMVal(I, GV, OpFlags);
Amara Emersond5785772018-01-18 19:21:27 +00001150 I.eraseFromParent();
1151 return true;
David Green9dd1d452018-08-22 11:31:39 +00001152 } else if (TM.getCodeModel() == CodeModel::Tiny) {
1153 I.setDesc(TII.get(AArch64::ADR));
1154 I.getOperand(1).setTargetFlags(OpFlags);
Tim Northoverfe7c59a2016-12-13 18:25:38 +00001155 } else {
Tim Northoverbdf16242016-10-10 21:50:00 +00001156 I.setDesc(TII.get(AArch64::MOVaddr));
1157 I.getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_PAGE);
1158 MachineInstrBuilder MIB(MF, I);
1159 MIB.addGlobalAddress(GV, I.getOperand(1).getOffset(),
1160 OpFlags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
1161 }
1162 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1163 }
1164
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001165 case TargetOpcode::G_LOAD:
1166 case TargetOpcode::G_STORE: {
Tim Northover0f140c72016-09-09 11:46:34 +00001167 LLT PtrTy = MRI.getType(I.getOperand(1).getReg());
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001168
Tim Northover5ae83502016-09-15 09:20:34 +00001169 if (PtrTy != LLT::pointer(0, 64)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001170 LLVM_DEBUG(dbgs() << "Load/Store pointer has type: " << PtrTy
1171 << ", expected: " << LLT::pointer(0, 64) << '\n');
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001172 return false;
1173 }
1174
Daniel Sanders3c1c4c02017-12-05 05:52:07 +00001175 auto &MemOp = **I.memoperands_begin();
1176 if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001177 LLVM_DEBUG(dbgs() << "Atomic load/store not supported yet\n");
Daniel Sanders3c1c4c02017-12-05 05:52:07 +00001178 return false;
1179 }
Daniel Sandersf84bc372018-05-05 20:53:24 +00001180 unsigned MemSizeInBits = MemOp.getSize() * 8;
Daniel Sanders3c1c4c02017-12-05 05:52:07 +00001181
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001182 const unsigned PtrReg = I.getOperand(1).getReg();
Ahmed Bougachaf0b22c42017-03-27 18:14:20 +00001183#ifndef NDEBUG
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001184 const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, MRI, TRI);
Ahmed Bougachaf0b22c42017-03-27 18:14:20 +00001185 // Sanity-check the pointer register.
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001186 assert(PtrRB.getID() == AArch64::GPRRegBankID &&
1187 "Load/Store pointer operand isn't a GPR");
Tim Northover0f140c72016-09-09 11:46:34 +00001188 assert(MRI.getType(PtrReg).isPointer() &&
1189 "Load/Store pointer operand isn't a pointer");
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001190#endif
1191
1192 const unsigned ValReg = I.getOperand(0).getReg();
1193 const RegisterBank &RB = *RBI.getRegBank(ValReg, MRI, TRI);
1194
1195 const unsigned NewOpc =
Daniel Sandersf84bc372018-05-05 20:53:24 +00001196 selectLoadStoreUIOp(I.getOpcode(), RB.getID(), MemSizeInBits);
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001197 if (NewOpc == I.getOpcode())
1198 return false;
1199
1200 I.setDesc(TII.get(NewOpc));
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001201
Ahmed Bougacha8a654082017-03-27 17:31:52 +00001202 uint64_t Offset = 0;
1203 auto *PtrMI = MRI.getVRegDef(PtrReg);
1204
1205 // Try to fold a GEP into our unsigned immediate addressing mode.
1206 if (PtrMI->getOpcode() == TargetOpcode::G_GEP) {
1207 if (auto COff = getConstantVRegVal(PtrMI->getOperand(2).getReg(), MRI)) {
1208 int64_t Imm = *COff;
Daniel Sandersf84bc372018-05-05 20:53:24 +00001209 const unsigned Size = MemSizeInBits / 8;
Ahmed Bougacha8a654082017-03-27 17:31:52 +00001210 const unsigned Scale = Log2_32(Size);
1211 if ((Imm & (Size - 1)) == 0 && Imm >= 0 && Imm < (0x1000 << Scale)) {
1212 unsigned Ptr2Reg = PtrMI->getOperand(1).getReg();
1213 I.getOperand(1).setReg(Ptr2Reg);
1214 PtrMI = MRI.getVRegDef(Ptr2Reg);
1215 Offset = Imm / Size;
1216 }
1217 }
1218 }
1219
Ahmed Bougachaf75782f2017-03-27 17:31:56 +00001220 // If we haven't folded anything into our addressing mode yet, try to fold
1221 // a frame index into the base+offset.
1222 if (!Offset && PtrMI->getOpcode() == TargetOpcode::G_FRAME_INDEX)
1223 I.getOperand(1).ChangeToFrameIndex(PtrMI->getOperand(1).getIndex());
1224
Ahmed Bougacha8a654082017-03-27 17:31:52 +00001225 I.addOperand(MachineOperand::CreateImm(Offset));
Ahmed Bougacha85a66a62017-03-27 17:31:48 +00001226
1227 // If we're storing a 0, use WZR/XZR.
1228 if (auto CVal = getConstantVRegVal(ValReg, MRI)) {
1229 if (*CVal == 0 && Opcode == TargetOpcode::G_STORE) {
1230 if (I.getOpcode() == AArch64::STRWui)
1231 I.getOperand(0).setReg(AArch64::WZR);
1232 else if (I.getOpcode() == AArch64::STRXui)
1233 I.getOperand(0).setReg(AArch64::XZR);
1234 }
1235 }
1236
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001237 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1238 }
1239
Tim Northover9dd78f82017-02-08 21:22:25 +00001240 case TargetOpcode::G_SMULH:
1241 case TargetOpcode::G_UMULH: {
1242 // Reject the various things we don't support yet.
1243 if (unsupportedBinOp(I, RBI, MRI, TRI))
1244 return false;
1245
1246 const unsigned DefReg = I.getOperand(0).getReg();
1247 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1248
1249 if (RB.getID() != AArch64::GPRRegBankID) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001250 LLVM_DEBUG(dbgs() << "G_[SU]MULH on bank: " << RB << ", expected: GPR\n");
Tim Northover9dd78f82017-02-08 21:22:25 +00001251 return false;
1252 }
1253
1254 if (Ty != LLT::scalar(64)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001255 LLVM_DEBUG(dbgs() << "G_[SU]MULH has type: " << Ty
1256 << ", expected: " << LLT::scalar(64) << '\n');
Tim Northover9dd78f82017-02-08 21:22:25 +00001257 return false;
1258 }
1259
1260 unsigned NewOpc = I.getOpcode() == TargetOpcode::G_SMULH ? AArch64::SMULHrr
1261 : AArch64::UMULHrr;
1262 I.setDesc(TII.get(NewOpc));
1263
1264 // Now that we selected an opcode, we need to constrain the register
1265 // operands to use appropriate classes.
1266 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1267 }
Ahmed Bougacha33e19fe2016-08-18 16:05:11 +00001268 case TargetOpcode::G_FADD:
1269 case TargetOpcode::G_FSUB:
1270 case TargetOpcode::G_FMUL:
1271 case TargetOpcode::G_FDIV:
1272
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001273 case TargetOpcode::G_OR:
Ahmed Bougacha2ac5bf92016-08-16 14:02:47 +00001274 case TargetOpcode::G_SHL:
1275 case TargetOpcode::G_LSHR:
1276 case TargetOpcode::G_ASHR:
Tim Northover2fda4b02016-10-10 21:49:49 +00001277 case TargetOpcode::G_GEP: {
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001278 // Reject the various things we don't support yet.
Ahmed Bougacha59e160a2016-08-16 14:37:40 +00001279 if (unsupportedBinOp(I, RBI, MRI, TRI))
1280 return false;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001281
Ahmed Bougacha59e160a2016-08-16 14:37:40 +00001282 const unsigned OpSize = Ty.getSizeInBits();
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001283
1284 const unsigned DefReg = I.getOperand(0).getReg();
1285 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1286
1287 const unsigned NewOpc = selectBinaryOp(I.getOpcode(), RB.getID(), OpSize);
1288 if (NewOpc == I.getOpcode())
1289 return false;
1290
1291 I.setDesc(TII.get(NewOpc));
1292 // FIXME: Should the type be always reset in setDesc?
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001293
1294 // Now that we selected an opcode, we need to constrain the register
1295 // operands to use appropriate classes.
1296 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1297 }
Tim Northover3d38b3a2016-10-11 20:50:21 +00001298
Tim Northover398c5f52017-02-14 20:56:29 +00001299 case TargetOpcode::G_PTR_MASK: {
1300 uint64_t Align = I.getOperand(2).getImm();
1301 if (Align >= 64 || Align == 0)
1302 return false;
1303
1304 uint64_t Mask = ~((1ULL << Align) - 1);
1305 I.setDesc(TII.get(AArch64::ANDXri));
1306 I.getOperand(2).setImm(AArch64_AM::encodeLogicalImmediate(Mask, 64));
1307
1308 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1309 }
Tim Northover037af52c2016-10-31 18:31:09 +00001310 case TargetOpcode::G_PTRTOINT:
Tim Northoverfb8d9892016-10-12 22:49:15 +00001311 case TargetOpcode::G_TRUNC: {
1312 const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
1313 const LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
1314
1315 const unsigned DstReg = I.getOperand(0).getReg();
1316 const unsigned SrcReg = I.getOperand(1).getReg();
1317
1318 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
1319 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
1320
1321 if (DstRB.getID() != SrcRB.getID()) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001322 LLVM_DEBUG(
1323 dbgs() << "G_TRUNC/G_PTRTOINT input/output on different banks\n");
Tim Northoverfb8d9892016-10-12 22:49:15 +00001324 return false;
1325 }
1326
1327 if (DstRB.getID() == AArch64::GPRRegBankID) {
1328 const TargetRegisterClass *DstRC =
1329 getRegClassForTypeOnBank(DstTy, DstRB, RBI);
1330 if (!DstRC)
1331 return false;
1332
1333 const TargetRegisterClass *SrcRC =
1334 getRegClassForTypeOnBank(SrcTy, SrcRB, RBI);
1335 if (!SrcRC)
1336 return false;
1337
1338 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1339 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001340 LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC/G_PTRTOINT\n");
Tim Northoverfb8d9892016-10-12 22:49:15 +00001341 return false;
1342 }
1343
1344 if (DstRC == SrcRC) {
1345 // Nothing to be done
Daniel Sanderscc36dbf2017-06-27 10:11:39 +00001346 } else if (Opcode == TargetOpcode::G_TRUNC && DstTy == LLT::scalar(32) &&
1347 SrcTy == LLT::scalar(64)) {
1348 llvm_unreachable("TableGen can import this case");
1349 return false;
Tim Northoverfb8d9892016-10-12 22:49:15 +00001350 } else if (DstRC == &AArch64::GPR32RegClass &&
1351 SrcRC == &AArch64::GPR64RegClass) {
1352 I.getOperand(1).setSubReg(AArch64::sub_32);
1353 } else {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001354 LLVM_DEBUG(
1355 dbgs() << "Unhandled mismatched classes in G_TRUNC/G_PTRTOINT\n");
Tim Northoverfb8d9892016-10-12 22:49:15 +00001356 return false;
1357 }
1358
1359 I.setDesc(TII.get(TargetOpcode::COPY));
1360 return true;
1361 } else if (DstRB.getID() == AArch64::FPRRegBankID) {
1362 if (DstTy == LLT::vector(4, 16) && SrcTy == LLT::vector(4, 32)) {
1363 I.setDesc(TII.get(AArch64::XTNv4i16));
1364 constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1365 return true;
1366 }
1367 }
1368
1369 return false;
1370 }
1371
Tim Northover3d38b3a2016-10-11 20:50:21 +00001372 case TargetOpcode::G_ANYEXT: {
1373 const unsigned DstReg = I.getOperand(0).getReg();
1374 const unsigned SrcReg = I.getOperand(1).getReg();
1375
Quentin Colombetcb629a82016-10-12 03:57:49 +00001376 const RegisterBank &RBDst = *RBI.getRegBank(DstReg, MRI, TRI);
1377 if (RBDst.getID() != AArch64::GPRRegBankID) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001378 LLVM_DEBUG(dbgs() << "G_ANYEXT on bank: " << RBDst
1379 << ", expected: GPR\n");
Quentin Colombetcb629a82016-10-12 03:57:49 +00001380 return false;
1381 }
Tim Northover3d38b3a2016-10-11 20:50:21 +00001382
Quentin Colombetcb629a82016-10-12 03:57:49 +00001383 const RegisterBank &RBSrc = *RBI.getRegBank(SrcReg, MRI, TRI);
1384 if (RBSrc.getID() != AArch64::GPRRegBankID) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001385 LLVM_DEBUG(dbgs() << "G_ANYEXT on bank: " << RBSrc
1386 << ", expected: GPR\n");
Tim Northover3d38b3a2016-10-11 20:50:21 +00001387 return false;
1388 }
1389
1390 const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
1391
1392 if (DstSize == 0) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001393 LLVM_DEBUG(dbgs() << "G_ANYEXT operand has no size, not a gvreg?\n");
Tim Northover3d38b3a2016-10-11 20:50:21 +00001394 return false;
1395 }
1396
Quentin Colombetcb629a82016-10-12 03:57:49 +00001397 if (DstSize != 64 && DstSize > 32) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001398 LLVM_DEBUG(dbgs() << "G_ANYEXT to size: " << DstSize
1399 << ", expected: 32 or 64\n");
Tim Northover3d38b3a2016-10-11 20:50:21 +00001400 return false;
1401 }
Quentin Colombetcb629a82016-10-12 03:57:49 +00001402 // At this point G_ANYEXT is just like a plain COPY, but we need
1403 // to explicitly form the 64-bit value if any.
1404 if (DstSize > 32) {
1405 unsigned ExtSrc = MRI.createVirtualRegister(&AArch64::GPR64allRegClass);
1406 BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
1407 .addDef(ExtSrc)
1408 .addImm(0)
1409 .addUse(SrcReg)
1410 .addImm(AArch64::sub_32);
1411 I.getOperand(1).setReg(ExtSrc);
Tim Northover3d38b3a2016-10-11 20:50:21 +00001412 }
Quentin Colombetcb629a82016-10-12 03:57:49 +00001413 return selectCopy(I, TII, MRI, TRI, RBI);
Tim Northover3d38b3a2016-10-11 20:50:21 +00001414 }
1415
1416 case TargetOpcode::G_ZEXT:
1417 case TargetOpcode::G_SEXT: {
1418 unsigned Opcode = I.getOpcode();
1419 const LLT DstTy = MRI.getType(I.getOperand(0).getReg()),
1420 SrcTy = MRI.getType(I.getOperand(1).getReg());
1421 const bool isSigned = Opcode == TargetOpcode::G_SEXT;
1422 const unsigned DefReg = I.getOperand(0).getReg();
1423 const unsigned SrcReg = I.getOperand(1).getReg();
1424 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1425
1426 if (RB.getID() != AArch64::GPRRegBankID) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001427 LLVM_DEBUG(dbgs() << TII.getName(I.getOpcode()) << " on bank: " << RB
1428 << ", expected: GPR\n");
Tim Northover3d38b3a2016-10-11 20:50:21 +00001429 return false;
1430 }
1431
1432 MachineInstr *ExtI;
1433 if (DstTy == LLT::scalar(64)) {
1434 // FIXME: Can we avoid manually doing this?
1435 if (!RBI.constrainGenericRegister(SrcReg, AArch64::GPR32RegClass, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001436 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(Opcode)
1437 << " operand\n");
Tim Northover3d38b3a2016-10-11 20:50:21 +00001438 return false;
1439 }
1440
1441 const unsigned SrcXReg =
1442 MRI.createVirtualRegister(&AArch64::GPR64RegClass);
1443 BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
1444 .addDef(SrcXReg)
1445 .addImm(0)
1446 .addUse(SrcReg)
1447 .addImm(AArch64::sub_32);
1448
1449 const unsigned NewOpc = isSigned ? AArch64::SBFMXri : AArch64::UBFMXri;
1450 ExtI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
1451 .addDef(DefReg)
1452 .addUse(SrcXReg)
1453 .addImm(0)
1454 .addImm(SrcTy.getSizeInBits() - 1);
Tim Northovera9105be2016-11-09 22:39:54 +00001455 } else if (DstTy.isScalar() && DstTy.getSizeInBits() <= 32) {
Tim Northover3d38b3a2016-10-11 20:50:21 +00001456 const unsigned NewOpc = isSigned ? AArch64::SBFMWri : AArch64::UBFMWri;
1457 ExtI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
1458 .addDef(DefReg)
1459 .addUse(SrcReg)
1460 .addImm(0)
1461 .addImm(SrcTy.getSizeInBits() - 1);
1462 } else {
1463 return false;
1464 }
1465
1466 constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1467
1468 I.eraseFromParent();
1469 return true;
1470 }
Tim Northoverc1d8c2b2016-10-11 22:29:23 +00001471
Tim Northover69271c62016-10-12 22:49:11 +00001472 case TargetOpcode::G_SITOFP:
1473 case TargetOpcode::G_UITOFP:
1474 case TargetOpcode::G_FPTOSI:
1475 case TargetOpcode::G_FPTOUI: {
1476 const LLT DstTy = MRI.getType(I.getOperand(0).getReg()),
1477 SrcTy = MRI.getType(I.getOperand(1).getReg());
1478 const unsigned NewOpc = selectFPConvOpc(Opcode, DstTy, SrcTy);
1479 if (NewOpc == Opcode)
1480 return false;
1481
1482 I.setDesc(TII.get(NewOpc));
1483 constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1484
1485 return true;
1486 }
1487
1488
Tim Northoverc1d8c2b2016-10-11 22:29:23 +00001489 case TargetOpcode::G_INTTOPTR:
Daniel Sandersedd07842017-08-17 09:26:14 +00001490 // The importer is currently unable to import pointer types since they
1491 // didn't exist in SelectionDAG.
Daniel Sanderseb2f5f32017-08-15 15:10:31 +00001492 return selectCopy(I, TII, MRI, TRI, RBI);
Daniel Sanders16e6dd32017-08-15 13:50:09 +00001493
Daniel Sandersedd07842017-08-17 09:26:14 +00001494 case TargetOpcode::G_BITCAST:
1495 // Imported SelectionDAG rules can handle every bitcast except those that
1496 // bitcast from a type to the same type. Ideally, these shouldn't occur
1497 // but we might not run an optimizer that deletes them.
1498 if (MRI.getType(I.getOperand(0).getReg()) ==
1499 MRI.getType(I.getOperand(1).getReg()))
1500 return selectCopy(I, TII, MRI, TRI, RBI);
1501 return false;
1502
Tim Northover9ac0eba2016-11-08 00:45:29 +00001503 case TargetOpcode::G_SELECT: {
1504 if (MRI.getType(I.getOperand(1).getReg()) != LLT::scalar(1)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001505 LLVM_DEBUG(dbgs() << "G_SELECT cond has type: " << Ty
1506 << ", expected: " << LLT::scalar(1) << '\n');
Tim Northover9ac0eba2016-11-08 00:45:29 +00001507 return false;
1508 }
1509
1510 const unsigned CondReg = I.getOperand(1).getReg();
1511 const unsigned TReg = I.getOperand(2).getReg();
1512 const unsigned FReg = I.getOperand(3).getReg();
1513
1514 unsigned CSelOpc = 0;
1515
1516 if (Ty == LLT::scalar(32)) {
1517 CSelOpc = AArch64::CSELWr;
Kristof Beylse9412b42017-01-19 13:32:14 +00001518 } else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) {
Tim Northover9ac0eba2016-11-08 00:45:29 +00001519 CSelOpc = AArch64::CSELXr;
1520 } else {
1521 return false;
1522 }
1523
1524 MachineInstr &TstMI =
1525 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ANDSWri))
1526 .addDef(AArch64::WZR)
1527 .addUse(CondReg)
1528 .addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
1529
1530 MachineInstr &CSelMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CSelOpc))
1531 .addDef(I.getOperand(0).getReg())
1532 .addUse(TReg)
1533 .addUse(FReg)
1534 .addImm(AArch64CC::NE);
1535
1536 constrainSelectedInstRegOperands(TstMI, TII, TRI, RBI);
1537 constrainSelectedInstRegOperands(CSelMI, TII, TRI, RBI);
1538
1539 I.eraseFromParent();
1540 return true;
1541 }
Tim Northover6c02ad52016-10-12 22:49:04 +00001542 case TargetOpcode::G_ICMP: {
Aditya Nandakumar02c602e2017-07-31 17:00:16 +00001543 if (Ty != LLT::scalar(32)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001544 LLVM_DEBUG(dbgs() << "G_ICMP result has type: " << Ty
1545 << ", expected: " << LLT::scalar(32) << '\n');
Tim Northover6c02ad52016-10-12 22:49:04 +00001546 return false;
1547 }
1548
1549 unsigned CmpOpc = 0;
1550 unsigned ZReg = 0;
1551
1552 LLT CmpTy = MRI.getType(I.getOperand(2).getReg());
1553 if (CmpTy == LLT::scalar(32)) {
1554 CmpOpc = AArch64::SUBSWrr;
1555 ZReg = AArch64::WZR;
1556 } else if (CmpTy == LLT::scalar(64) || CmpTy.isPointer()) {
1557 CmpOpc = AArch64::SUBSXrr;
1558 ZReg = AArch64::XZR;
1559 } else {
1560 return false;
1561 }
1562
Kristof Beyls22524402017-01-05 10:16:08 +00001563 // CSINC increments the result by one when the condition code is false.
1564 // Therefore, we have to invert the predicate to get an increment by 1 when
1565 // the predicate is true.
1566 const AArch64CC::CondCode invCC =
1567 changeICMPPredToAArch64CC(CmpInst::getInversePredicate(
1568 (CmpInst::Predicate)I.getOperand(1).getPredicate()));
Tim Northover6c02ad52016-10-12 22:49:04 +00001569
1570 MachineInstr &CmpMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CmpOpc))
1571 .addDef(ZReg)
1572 .addUse(I.getOperand(2).getReg())
1573 .addUse(I.getOperand(3).getReg());
1574
1575 MachineInstr &CSetMI =
1576 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
1577 .addDef(I.getOperand(0).getReg())
1578 .addUse(AArch64::WZR)
1579 .addUse(AArch64::WZR)
Kristof Beyls22524402017-01-05 10:16:08 +00001580 .addImm(invCC);
Tim Northover6c02ad52016-10-12 22:49:04 +00001581
1582 constrainSelectedInstRegOperands(CmpMI, TII, TRI, RBI);
1583 constrainSelectedInstRegOperands(CSetMI, TII, TRI, RBI);
1584
1585 I.eraseFromParent();
1586 return true;
1587 }
1588
Tim Northover7dd378d2016-10-12 22:49:07 +00001589 case TargetOpcode::G_FCMP: {
Aditya Nandakumar02c602e2017-07-31 17:00:16 +00001590 if (Ty != LLT::scalar(32)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001591 LLVM_DEBUG(dbgs() << "G_FCMP result has type: " << Ty
1592 << ", expected: " << LLT::scalar(32) << '\n');
Tim Northover7dd378d2016-10-12 22:49:07 +00001593 return false;
1594 }
1595
1596 unsigned CmpOpc = 0;
1597 LLT CmpTy = MRI.getType(I.getOperand(2).getReg());
1598 if (CmpTy == LLT::scalar(32)) {
1599 CmpOpc = AArch64::FCMPSrr;
1600 } else if (CmpTy == LLT::scalar(64)) {
1601 CmpOpc = AArch64::FCMPDrr;
1602 } else {
1603 return false;
1604 }
1605
1606 // FIXME: regbank
1607
1608 AArch64CC::CondCode CC1, CC2;
1609 changeFCMPPredToAArch64CC(
1610 (CmpInst::Predicate)I.getOperand(1).getPredicate(), CC1, CC2);
1611
1612 MachineInstr &CmpMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CmpOpc))
1613 .addUse(I.getOperand(2).getReg())
1614 .addUse(I.getOperand(3).getReg());
1615
1616 const unsigned DefReg = I.getOperand(0).getReg();
1617 unsigned Def1Reg = DefReg;
1618 if (CC2 != AArch64CC::AL)
1619 Def1Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
1620
1621 MachineInstr &CSetMI =
1622 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
1623 .addDef(Def1Reg)
1624 .addUse(AArch64::WZR)
1625 .addUse(AArch64::WZR)
Tim Northover33a1a0b2017-01-17 23:04:01 +00001626 .addImm(getInvertedCondCode(CC1));
Tim Northover7dd378d2016-10-12 22:49:07 +00001627
1628 if (CC2 != AArch64CC::AL) {
1629 unsigned Def2Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
1630 MachineInstr &CSet2MI =
1631 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
1632 .addDef(Def2Reg)
1633 .addUse(AArch64::WZR)
1634 .addUse(AArch64::WZR)
Tim Northover33a1a0b2017-01-17 23:04:01 +00001635 .addImm(getInvertedCondCode(CC2));
Tim Northover7dd378d2016-10-12 22:49:07 +00001636 MachineInstr &OrMI =
1637 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ORRWrr))
1638 .addDef(DefReg)
1639 .addUse(Def1Reg)
1640 .addUse(Def2Reg);
1641 constrainSelectedInstRegOperands(OrMI, TII, TRI, RBI);
1642 constrainSelectedInstRegOperands(CSet2MI, TII, TRI, RBI);
1643 }
1644
1645 constrainSelectedInstRegOperands(CmpMI, TII, TRI, RBI);
1646 constrainSelectedInstRegOperands(CSetMI, TII, TRI, RBI);
1647
1648 I.eraseFromParent();
1649 return true;
1650 }
Tim Northovere9600d82017-02-08 17:57:27 +00001651 case TargetOpcode::G_VASTART:
1652 return STI.isTargetDarwin() ? selectVaStartDarwin(I, MF, MRI)
1653 : selectVaStartAAPCS(I, MF, MRI);
Amara Emerson1f5d9942018-04-25 14:43:59 +00001654 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1655 if (!I.getOperand(0).isIntrinsicID())
1656 return false;
1657 if (I.getOperand(0).getIntrinsicID() != Intrinsic::trap)
1658 return false;
1659 BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::BRK))
1660 .addImm(1);
1661 I.eraseFromParent();
1662 return true;
Amara Emerson1e8c1642018-07-31 00:09:02 +00001663 case TargetOpcode::G_IMPLICIT_DEF: {
Justin Bogner4fc69662017-07-12 17:32:32 +00001664 I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
Amara Emerson58aea522018-02-02 01:44:43 +00001665 const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
1666 const unsigned DstReg = I.getOperand(0).getReg();
1667 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
1668 const TargetRegisterClass *DstRC =
1669 getRegClassForTypeOnBank(DstTy, DstRB, RBI);
1670 RBI.constrainGenericRegister(DstReg, *DstRC, MRI);
Justin Bogner4fc69662017-07-12 17:32:32 +00001671 return true;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001672 }
Amara Emerson1e8c1642018-07-31 00:09:02 +00001673 case TargetOpcode::G_BLOCK_ADDR: {
1674 if (TM.getCodeModel() == CodeModel::Large) {
1675 materializeLargeCMVal(I, I.getOperand(1).getBlockAddress(), 0);
1676 I.eraseFromParent();
1677 return true;
1678 } else {
1679 I.setDesc(TII.get(AArch64::MOVaddrBA));
1680 auto MovMI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::MOVaddrBA),
1681 I.getOperand(0).getReg())
1682 .addBlockAddress(I.getOperand(1).getBlockAddress(),
1683 /* Offset */ 0, AArch64II::MO_PAGE)
1684 .addBlockAddress(
1685 I.getOperand(1).getBlockAddress(), /* Offset */ 0,
1686 AArch64II::MO_NC | AArch64II::MO_PAGEOFF);
1687 I.eraseFromParent();
1688 return constrainSelectedInstRegOperands(*MovMI, TII, TRI, RBI);
1689 }
1690 }
Amara Emerson5ec14602018-12-10 18:44:58 +00001691 case TargetOpcode::G_BUILD_VECTOR:
1692 return selectBuildVector(I, MRI);
Amara Emerson8cb186c2018-12-20 01:11:04 +00001693 case TargetOpcode::G_MERGE_VALUES:
1694 return selectMergeValues(I, MRI);
Jessica Paquette245047d2019-01-24 22:00:41 +00001695 case TargetOpcode::G_UNMERGE_VALUES:
1696 return selectUnmergeValues(I, MRI);
Amara Emerson1e8c1642018-07-31 00:09:02 +00001697 }
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001698
1699 return false;
1700}
Daniel Sanders8a4bae92017-03-14 21:32:08 +00001701
Amara Emerson5ec14602018-12-10 18:44:58 +00001702bool AArch64InstructionSelector::emitScalarToVector(
1703 unsigned &Dst, const LLT DstTy, const TargetRegisterClass *DstRC,
1704 unsigned Scalar, MachineBasicBlock &MBB,
1705 MachineBasicBlock::iterator MBBI, MachineRegisterInfo &MRI) const {
1706 Dst = MRI.createVirtualRegister(DstRC);
1707
1708 unsigned UndefVec = MRI.createVirtualRegister(DstRC);
1709 MachineInstr &UndefMI = *BuildMI(MBB, MBBI, MBBI->getDebugLoc(),
1710 TII.get(TargetOpcode::IMPLICIT_DEF))
1711 .addDef(UndefVec);
1712
1713 auto BuildFn = [&](unsigned SubregIndex) {
1714 MachineInstr &InsMI = *BuildMI(MBB, MBBI, MBBI->getDebugLoc(),
1715 TII.get(TargetOpcode::INSERT_SUBREG))
1716 .addDef(Dst)
1717 .addUse(UndefVec)
1718 .addUse(Scalar)
1719 .addImm(SubregIndex);
1720 constrainSelectedInstRegOperands(UndefMI, TII, TRI, RBI);
1721 return constrainSelectedInstRegOperands(InsMI, TII, TRI, RBI);
1722 };
1723
1724 switch (DstTy.getElementType().getSizeInBits()) {
Jessica Paquette245047d2019-01-24 22:00:41 +00001725 case 16:
1726 return BuildFn(AArch64::hsub);
Amara Emerson5ec14602018-12-10 18:44:58 +00001727 case 32:
1728 return BuildFn(AArch64::ssub);
1729 case 64:
1730 return BuildFn(AArch64::dsub);
1731 default:
1732 return false;
1733 }
1734}
1735
Amara Emerson8cb186c2018-12-20 01:11:04 +00001736bool AArch64InstructionSelector::selectMergeValues(
1737 MachineInstr &I, MachineRegisterInfo &MRI) const {
1738 assert(I.getOpcode() == TargetOpcode::G_MERGE_VALUES && "unexpected opcode");
1739 const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
1740 const LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
1741 assert(!DstTy.isVector() && !SrcTy.isVector() && "invalid merge operation");
1742
1743 // At the moment we only support merging two s32s into an s64.
1744 if (I.getNumOperands() != 3)
1745 return false;
1746 if (DstTy.getSizeInBits() != 64 || SrcTy.getSizeInBits() != 32)
1747 return false;
1748 const RegisterBank &RB = *RBI.getRegBank(I.getOperand(1).getReg(), MRI, TRI);
1749 if (RB.getID() != AArch64::GPRRegBankID)
1750 return false;
1751
1752 auto *DstRC = &AArch64::GPR64RegClass;
1753 unsigned SubToRegDef = MRI.createVirtualRegister(DstRC);
1754 MachineInstr &SubRegMI = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1755 TII.get(TargetOpcode::SUBREG_TO_REG))
1756 .addDef(SubToRegDef)
1757 .addImm(0)
1758 .addUse(I.getOperand(1).getReg())
1759 .addImm(AArch64::sub_32);
1760 unsigned SubToRegDef2 = MRI.createVirtualRegister(DstRC);
1761 // Need to anyext the second scalar before we can use bfm
1762 MachineInstr &SubRegMI2 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1763 TII.get(TargetOpcode::SUBREG_TO_REG))
1764 .addDef(SubToRegDef2)
1765 .addImm(0)
1766 .addUse(I.getOperand(2).getReg())
1767 .addImm(AArch64::sub_32);
Amara Emerson8cb186c2018-12-20 01:11:04 +00001768 MachineInstr &BFM =
1769 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::BFMXri))
Amara Emerson321bfb22018-12-20 03:27:42 +00001770 .addDef(I.getOperand(0).getReg())
Amara Emerson8cb186c2018-12-20 01:11:04 +00001771 .addUse(SubToRegDef)
1772 .addUse(SubToRegDef2)
1773 .addImm(32)
1774 .addImm(31);
1775 constrainSelectedInstRegOperands(SubRegMI, TII, TRI, RBI);
1776 constrainSelectedInstRegOperands(SubRegMI2, TII, TRI, RBI);
1777 constrainSelectedInstRegOperands(BFM, TII, TRI, RBI);
1778 I.eraseFromParent();
1779 return true;
1780}
1781
Jessica Paquette245047d2019-01-24 22:00:41 +00001782bool AArch64InstructionSelector::selectUnmergeValues(
1783 MachineInstr &I, MachineRegisterInfo &MRI) const {
1784 assert(I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1785 "unexpected opcode");
1786
1787 // TODO: Handle unmerging into GPRs and from scalars to scalars.
1788 if (RBI.getRegBank(I.getOperand(0).getReg(), MRI, TRI)->getID() !=
1789 AArch64::FPRRegBankID ||
1790 RBI.getRegBank(I.getOperand(1).getReg(), MRI, TRI)->getID() !=
1791 AArch64::FPRRegBankID) {
1792 LLVM_DEBUG(dbgs() << "Unmerging vector-to-gpr and scalar-to-scalar "
1793 "currently unsupported.\n");
1794 return false;
1795 }
1796
1797 // The last operand is the vector source register, and every other operand is
1798 // a register to unpack into.
1799 unsigned NumElts = I.getNumOperands() - 1;
1800 unsigned SrcReg = I.getOperand(NumElts).getReg();
1801 const LLT NarrowTy = MRI.getType(I.getOperand(0).getReg());
1802 const LLT WideTy = MRI.getType(SrcReg);
1803 assert(WideTy.isVector() && "can only unmerge from vector types!");
1804 assert(WideTy.getSizeInBits() > NarrowTy.getSizeInBits() &&
1805 "source register size too small!");
1806
1807 // TODO: Handle unmerging into scalars.
1808 if (!NarrowTy.isScalar()) {
1809 LLVM_DEBUG(dbgs() << "Vector-to-vector unmerges not supported yet.\n");
1810 return false;
1811 }
1812
1813 // Choose a lane copy opcode and subregister based off of the size of the
1814 // vector's elements.
1815 unsigned CopyOpc = 0;
1816 unsigned ExtractSubReg = 0;
1817 switch (NarrowTy.getSizeInBits()) {
1818 case 16:
1819 CopyOpc = AArch64::CPYi16;
1820 ExtractSubReg = AArch64::hsub;
1821 break;
1822 case 32:
1823 CopyOpc = AArch64::CPYi32;
1824 ExtractSubReg = AArch64::ssub;
1825 break;
1826 case 64:
1827 CopyOpc = AArch64::CPYi64;
1828 ExtractSubReg = AArch64::dsub;
1829 break;
1830 default:
1831 // Unknown size, bail out.
1832 LLVM_DEBUG(dbgs() << "NarrowTy had unsupported size.\n");
1833 return false;
1834 }
1835
1836 // Set up for the lane copies.
1837 MachineBasicBlock &MBB = *I.getParent();
1838
1839 // Stores the registers we'll be copying from.
1840 SmallVector<unsigned, 4> InsertRegs;
1841
1842 // We'll use the first register twice, so we only need NumElts-1 registers.
1843 unsigned NumInsertRegs = NumElts - 1;
1844
1845 // If our elements fit into exactly 128 bits, then we can copy from the source
1846 // directly. Otherwise, we need to do a bit of setup with some subregister
1847 // inserts.
1848 if (NarrowTy.getSizeInBits() * NumElts == 128) {
1849 InsertRegs = SmallVector<unsigned, 4>(NumInsertRegs, SrcReg);
1850 } else {
1851 // No. We have to perform subregister inserts. For each insert, create an
1852 // implicit def and a subregister insert, and save the register we create.
1853 for (unsigned Idx = 0; Idx < NumInsertRegs; ++Idx) {
1854 unsigned ImpDefReg = MRI.createVirtualRegister(&AArch64::FPR128RegClass);
1855 MachineInstr &ImpDefMI =
1856 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(TargetOpcode::IMPLICIT_DEF),
1857 ImpDefReg);
1858
1859 // Now, create the subregister insert from SrcReg.
1860 unsigned InsertReg = MRI.createVirtualRegister(&AArch64::FPR128RegClass);
1861 MachineInstr &InsMI =
1862 *BuildMI(MBB, I, I.getDebugLoc(),
1863 TII.get(TargetOpcode::INSERT_SUBREG), InsertReg)
1864 .addUse(ImpDefReg)
1865 .addUse(SrcReg)
1866 .addImm(AArch64::dsub);
1867
1868 constrainSelectedInstRegOperands(ImpDefMI, TII, TRI, RBI);
1869 constrainSelectedInstRegOperands(InsMI, TII, TRI, RBI);
1870
1871 // Save the register so that we can copy from it after.
1872 InsertRegs.push_back(InsertReg);
1873 }
1874 }
1875
1876 // Now that we've created any necessary subregister inserts, we can
1877 // create the copies.
1878 //
1879 // Perform the first copy separately as a subregister copy.
1880 unsigned CopyTo = I.getOperand(0).getReg();
1881 MachineInstr &FirstCopy =
1882 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), CopyTo)
1883 .addUse(InsertRegs[0], 0, ExtractSubReg);
1884 constrainSelectedInstRegOperands(FirstCopy, TII, TRI, RBI);
1885
1886 // Now, perform the remaining copies as vector lane copies.
1887 unsigned LaneIdx = 1;
1888 for (unsigned InsReg : InsertRegs) {
1889 unsigned CopyTo = I.getOperand(LaneIdx).getReg();
1890 MachineInstr &CopyInst =
1891 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CopyOpc), CopyTo)
1892 .addUse(InsReg)
1893 .addImm(LaneIdx);
1894 constrainSelectedInstRegOperands(CopyInst, TII, TRI, RBI);
1895 ++LaneIdx;
1896 }
1897
1898 // Separately constrain the first copy's destination. Because of the
1899 // limitation in constrainOperandRegClass, we can't guarantee that this will
1900 // actually be constrained. So, do it ourselves using the second operand.
1901 const TargetRegisterClass *RC =
1902 MRI.getRegClassOrNull(I.getOperand(1).getReg());
1903 if (!RC) {
1904 LLVM_DEBUG(dbgs() << "Couldn't constrain copy destination.\n");
1905 return false;
1906 }
1907
1908 RBI.constrainGenericRegister(CopyTo, *RC, MRI);
1909 I.eraseFromParent();
1910 return true;
1911}
1912
Amara Emerson5ec14602018-12-10 18:44:58 +00001913bool AArch64InstructionSelector::selectBuildVector(
1914 MachineInstr &I, MachineRegisterInfo &MRI) const {
1915 assert(I.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
1916 // Until we port more of the optimized selections, for now just use a vector
1917 // insert sequence.
1918 const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
1919 const LLT EltTy = MRI.getType(I.getOperand(1).getReg());
1920 unsigned EltSize = EltTy.getSizeInBits();
Jessica Paquette245047d2019-01-24 22:00:41 +00001921 if (EltSize < 16 || EltSize > 64)
Amara Emerson5ec14602018-12-10 18:44:58 +00001922 return false; // Don't support all element types yet.
1923 const RegisterBank &RB = *RBI.getRegBank(I.getOperand(1).getReg(), MRI, TRI);
1924 unsigned Opc;
1925 unsigned SubregIdx;
1926 if (RB.getID() == AArch64::GPRRegBankID) {
1927 if (EltSize == 32) {
1928 Opc = AArch64::INSvi32gpr;
1929 SubregIdx = AArch64::ssub;
1930 } else {
1931 Opc = AArch64::INSvi64gpr;
1932 SubregIdx = AArch64::dsub;
1933 }
1934 } else {
Jessica Paquette245047d2019-01-24 22:00:41 +00001935 if (EltSize == 16) {
1936 Opc = AArch64::INSvi16lane;
1937 SubregIdx = AArch64::hsub;
1938 } else if (EltSize == 32) {
Amara Emerson5ec14602018-12-10 18:44:58 +00001939 Opc = AArch64::INSvi32lane;
1940 SubregIdx = AArch64::ssub;
1941 } else {
1942 Opc = AArch64::INSvi64lane;
1943 SubregIdx = AArch64::dsub;
1944 }
1945 }
1946
Amara Emerson5ec14602018-12-10 18:44:58 +00001947 unsigned DstVec = 0;
Jessica Paquette245047d2019-01-24 22:00:41 +00001948
1949 const TargetRegisterClass *DstRC = &AArch64::FPR128RegClass;
1950 if (!emitScalarToVector(DstVec, DstTy, DstRC, I.getOperand(1).getReg(),
1951 *I.getParent(), I.getIterator(), MRI))
1952 return false;
1953
1954 unsigned DstSize = DstTy.getSizeInBits();
1955
1956 // Keep track of the last MI we inserted. Later on, we might be able to save
1957 // a copy using it.
1958 MachineInstr *PrevMI = nullptr;
1959 for (unsigned i = 2, e = DstSize / EltSize + 1; i < e; ++i) {
Amara Emerson5ec14602018-12-10 18:44:58 +00001960 unsigned InsDef;
Jessica Paquette245047d2019-01-24 22:00:41 +00001961
1962 // Note that if we don't do a subregister copy, we end up making one more
1963 // of these than we need.
1964 InsDef = MRI.createVirtualRegister(DstRC);
Amara Emerson5ec14602018-12-10 18:44:58 +00001965 unsigned LaneIdx = i - 1;
1966 if (RB.getID() == AArch64::FPRRegBankID) {
1967 unsigned ImpDef = MRI.createVirtualRegister(DstRC);
1968 MachineInstr &ImpDefMI = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1969 TII.get(TargetOpcode::IMPLICIT_DEF))
1970 .addDef(ImpDef);
1971 unsigned InsSubDef = MRI.createVirtualRegister(DstRC);
1972 MachineInstr &InsSubMI = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1973 TII.get(TargetOpcode::INSERT_SUBREG))
1974 .addDef(InsSubDef)
1975 .addUse(ImpDef)
1976 .addUse(I.getOperand(i).getReg())
1977 .addImm(SubregIdx);
1978 MachineInstr &InsEltMI =
1979 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opc))
1980 .addDef(InsDef)
1981 .addUse(DstVec)
1982 .addImm(LaneIdx)
1983 .addUse(InsSubDef)
1984 .addImm(0);
1985 constrainSelectedInstRegOperands(ImpDefMI, TII, TRI, RBI);
1986 constrainSelectedInstRegOperands(InsSubMI, TII, TRI, RBI);
1987 constrainSelectedInstRegOperands(InsEltMI, TII, TRI, RBI);
1988 DstVec = InsDef;
Jessica Paquette245047d2019-01-24 22:00:41 +00001989 PrevMI = &InsEltMI;
Amara Emerson5ec14602018-12-10 18:44:58 +00001990 } else {
1991 MachineInstr &InsMI =
1992 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opc))
1993 .addDef(InsDef)
1994 .addUse(DstVec)
1995 .addImm(LaneIdx)
1996 .addUse(I.getOperand(i).getReg());
1997 constrainSelectedInstRegOperands(InsMI, TII, TRI, RBI);
1998 DstVec = InsDef;
Jessica Paquette245047d2019-01-24 22:00:41 +00001999 PrevMI = &InsMI;
Amara Emerson5ec14602018-12-10 18:44:58 +00002000 }
2001 }
Jessica Paquette245047d2019-01-24 22:00:41 +00002002
2003 // If DstTy's size in bits is less than 128, then emit a subregister copy
2004 // from DstVec to the last register we've defined.
2005 if (DstSize < 128) {
2006 unsigned SubReg = 0;
2007
2008 // Helper lambda to decide on a register class and subregister for the
2009 // subregister copy.
2010 auto GetRegInfoForCopy = [&SubReg,
2011 &DstSize]() -> const TargetRegisterClass * {
2012 switch (DstSize) {
2013 default:
2014 LLVM_DEBUG(dbgs() << "Unknown destination size (" << DstSize << ")\n");
2015 return nullptr;
2016 case 32:
2017 SubReg = AArch64::ssub;
2018 return &AArch64::FPR32RegClass;
2019 case 64:
2020 SubReg = AArch64::dsub;
2021 return &AArch64::FPR64RegClass;
2022 }
2023 };
2024
2025 const TargetRegisterClass *RC = GetRegInfoForCopy();
2026 if (!RC)
2027 return false;
2028
2029 unsigned Reg = MRI.createVirtualRegister(RC);
2030 unsigned DstReg = I.getOperand(0).getReg();
2031
2032 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),
2033 DstReg)
2034 .addUse(DstVec, 0, SubReg);
2035 MachineOperand &RegOp = I.getOperand(1);
2036 RegOp.setReg(Reg);
2037 RBI.constrainGenericRegister(DstReg, *RC, MRI);
2038 } else {
2039 // We don't need a subregister copy. Save a copy by re-using the
2040 // destination register on the final insert.
2041 assert(PrevMI && "PrevMI was null?");
2042 PrevMI->getOperand(0).setReg(I.getOperand(0).getReg());
2043 constrainSelectedInstRegOperands(*PrevMI, TII, TRI, RBI);
2044 }
2045
Amara Emerson5ec14602018-12-10 18:44:58 +00002046 I.eraseFromParent();
2047 return true;
2048}
2049
Daniel Sanders8a4bae92017-03-14 21:32:08 +00002050/// SelectArithImmed - Select an immediate value that can be represented as
2051/// a 12-bit value shifted left by either 0 or 12. If so, return true with
2052/// Val set to the 12-bit value and Shift set to the shifter operand.
Daniel Sanders1e4569f2017-10-20 20:55:29 +00002053InstructionSelector::ComplexRendererFns
Daniel Sanders2deea182017-04-22 15:11:04 +00002054AArch64InstructionSelector::selectArithImmed(MachineOperand &Root) const {
Daniel Sanders8a4bae92017-03-14 21:32:08 +00002055 MachineInstr &MI = *Root.getParent();
2056 MachineBasicBlock &MBB = *MI.getParent();
2057 MachineFunction &MF = *MBB.getParent();
2058 MachineRegisterInfo &MRI = MF.getRegInfo();
2059
2060 // This function is called from the addsub_shifted_imm ComplexPattern,
2061 // which lists [imm] as the list of opcode it's interested in, however
2062 // we still need to check whether the operand is actually an immediate
2063 // here because the ComplexPattern opcode list is only used in
2064 // root-level opcode matching.
2065 uint64_t Immed;
2066 if (Root.isImm())
2067 Immed = Root.getImm();
2068 else if (Root.isCImm())
2069 Immed = Root.getCImm()->getZExtValue();
2070 else if (Root.isReg()) {
2071 MachineInstr *Def = MRI.getVRegDef(Root.getReg());
2072 if (Def->getOpcode() != TargetOpcode::G_CONSTANT)
Daniel Sandersdf39cba2017-10-15 18:22:54 +00002073 return None;
Daniel Sanders0e642022017-03-16 18:04:50 +00002074 MachineOperand &Op1 = Def->getOperand(1);
2075 if (!Op1.isCImm() || Op1.getCImm()->getBitWidth() > 64)
Daniel Sandersdf39cba2017-10-15 18:22:54 +00002076 return None;
Daniel Sanders0e642022017-03-16 18:04:50 +00002077 Immed = Op1.getCImm()->getZExtValue();
Daniel Sanders8a4bae92017-03-14 21:32:08 +00002078 } else
Daniel Sandersdf39cba2017-10-15 18:22:54 +00002079 return None;
Daniel Sanders8a4bae92017-03-14 21:32:08 +00002080
2081 unsigned ShiftAmt;
2082
2083 if (Immed >> 12 == 0) {
2084 ShiftAmt = 0;
2085 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
2086 ShiftAmt = 12;
2087 Immed = Immed >> 12;
2088 } else
Daniel Sandersdf39cba2017-10-15 18:22:54 +00002089 return None;
Daniel Sanders8a4bae92017-03-14 21:32:08 +00002090
2091 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
Daniel Sandersdf39cba2017-10-15 18:22:54 +00002092 return {{
2093 [=](MachineInstrBuilder &MIB) { MIB.addImm(Immed); },
2094 [=](MachineInstrBuilder &MIB) { MIB.addImm(ShVal); },
2095 }};
Daniel Sanders8a4bae92017-03-14 21:32:08 +00002096}
Daniel Sanders0b5293f2017-04-06 09:49:34 +00002097
Daniel Sandersea8711b2017-10-16 03:36:29 +00002098/// Select a "register plus unscaled signed 9-bit immediate" address. This
2099/// should only match when there is an offset that is not valid for a scaled
2100/// immediate addressing mode. The "Size" argument is the size in bytes of the
2101/// memory reference, which is needed here to know what is valid for a scaled
2102/// immediate.
Daniel Sanders1e4569f2017-10-20 20:55:29 +00002103InstructionSelector::ComplexRendererFns
Daniel Sandersea8711b2017-10-16 03:36:29 +00002104AArch64InstructionSelector::selectAddrModeUnscaled(MachineOperand &Root,
2105 unsigned Size) const {
2106 MachineRegisterInfo &MRI =
2107 Root.getParent()->getParent()->getParent()->getRegInfo();
2108
2109 if (!Root.isReg())
2110 return None;
2111
2112 if (!isBaseWithConstantOffset(Root, MRI))
2113 return None;
2114
2115 MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
2116 if (!RootDef)
2117 return None;
2118
2119 MachineOperand &OffImm = RootDef->getOperand(2);
2120 if (!OffImm.isReg())
2121 return None;
2122 MachineInstr *RHS = MRI.getVRegDef(OffImm.getReg());
2123 if (!RHS || RHS->getOpcode() != TargetOpcode::G_CONSTANT)
2124 return None;
2125 int64_t RHSC;
2126 MachineOperand &RHSOp1 = RHS->getOperand(1);
2127 if (!RHSOp1.isCImm() || RHSOp1.getCImm()->getBitWidth() > 64)
2128 return None;
2129 RHSC = RHSOp1.getCImm()->getSExtValue();
2130
2131 // If the offset is valid as a scaled immediate, don't match here.
2132 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Log2_32(Size)))
2133 return None;
2134 if (RHSC >= -256 && RHSC < 256) {
2135 MachineOperand &Base = RootDef->getOperand(1);
2136 return {{
2137 [=](MachineInstrBuilder &MIB) { MIB.add(Base); },
2138 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
2139 }};
2140 }
2141 return None;
2142}
2143
2144/// Select a "register plus scaled unsigned 12-bit immediate" address. The
2145/// "Size" argument is the size in bytes of the memory reference, which
2146/// determines the scale.
Daniel Sanders1e4569f2017-10-20 20:55:29 +00002147InstructionSelector::ComplexRendererFns
Daniel Sandersea8711b2017-10-16 03:36:29 +00002148AArch64InstructionSelector::selectAddrModeIndexed(MachineOperand &Root,
2149 unsigned Size) const {
2150 MachineRegisterInfo &MRI =
2151 Root.getParent()->getParent()->getParent()->getRegInfo();
2152
2153 if (!Root.isReg())
2154 return None;
2155
2156 MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
2157 if (!RootDef)
2158 return None;
2159
2160 if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
2161 return {{
2162 [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
2163 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
2164 }};
2165 }
2166
2167 if (isBaseWithConstantOffset(Root, MRI)) {
2168 MachineOperand &LHS = RootDef->getOperand(1);
2169 MachineOperand &RHS = RootDef->getOperand(2);
2170 MachineInstr *LHSDef = MRI.getVRegDef(LHS.getReg());
2171 MachineInstr *RHSDef = MRI.getVRegDef(RHS.getReg());
2172 if (LHSDef && RHSDef) {
2173 int64_t RHSC = (int64_t)RHSDef->getOperand(1).getCImm()->getZExtValue();
2174 unsigned Scale = Log2_32(Size);
2175 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
2176 if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
Daniel Sanders01805b62017-10-16 05:39:30 +00002177 return {{
2178 [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); },
2179 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC >> Scale); },
2180 }};
2181
Daniel Sandersea8711b2017-10-16 03:36:29 +00002182 return {{
2183 [=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
2184 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC >> Scale); },
2185 }};
2186 }
2187 }
2188 }
2189
2190 // Before falling back to our general case, check if the unscaled
2191 // instructions can handle this. If so, that's preferable.
2192 if (selectAddrModeUnscaled(Root, Size).hasValue())
2193 return None;
2194
2195 return {{
2196 [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
2197 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
2198 }};
2199}
2200
Volkan Kelesf7f25682018-01-16 18:44:05 +00002201void AArch64InstructionSelector::renderTruncImm(MachineInstrBuilder &MIB,
2202 const MachineInstr &MI) const {
2203 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
2204 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
2205 Optional<int64_t> CstVal = getConstantVRegVal(MI.getOperand(0).getReg(), MRI);
2206 assert(CstVal && "Expected constant value");
2207 MIB.addImm(CstVal.getValue());
2208}
2209
Daniel Sanders0b5293f2017-04-06 09:49:34 +00002210namespace llvm {
2211InstructionSelector *
2212createAArch64InstructionSelector(const AArch64TargetMachine &TM,
2213 AArch64Subtarget &Subtarget,
2214 AArch64RegisterBankInfo &RBI) {
2215 return new AArch64InstructionSelector(TM, Subtarget, RBI);
2216}
2217}