blob: 63f59cfd00308ee0f1f451489bb7a70083ec504a [file] [log] [blame]
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001//===- AArch64InstructionSelector.cpp ----------------------------*- C++ -*-==//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file
10/// This file implements the targeting of the InstructionSelector class for
11/// AArch64.
12/// \todo This should be generated by TableGen.
13//===----------------------------------------------------------------------===//
14
15#include "AArch64InstructionSelector.h"
16#include "AArch64InstrInfo.h"
17#include "AArch64RegisterBankInfo.h"
18#include "AArch64RegisterInfo.h"
19#include "AArch64Subtarget.h"
Tim Northoverbdf16242016-10-10 21:50:00 +000020#include "AArch64TargetMachine.h"
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +000021#include "llvm/CodeGen/MachineBasicBlock.h"
22#include "llvm/CodeGen/MachineFunction.h"
23#include "llvm/CodeGen/MachineInstr.h"
24#include "llvm/CodeGen/MachineInstrBuilder.h"
25#include "llvm/CodeGen/MachineRegisterInfo.h"
26#include "llvm/IR/Type.h"
27#include "llvm/Support/Debug.h"
28#include "llvm/Support/raw_ostream.h"
29
30#define DEBUG_TYPE "aarch64-isel"
31
32using namespace llvm;
33
34#ifndef LLVM_BUILD_GLOBAL_ISEL
35#error "You shouldn't build this"
36#endif
37
38AArch64InstructionSelector::AArch64InstructionSelector(
Tim Northoverbdf16242016-10-10 21:50:00 +000039 const AArch64TargetMachine &TM, const AArch64Subtarget &STI,
40 const AArch64RegisterBankInfo &RBI)
41 : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +000042 TRI(*STI.getRegisterInfo()), RBI(RBI) {}
43
Ahmed Bougacha59e160a2016-08-16 14:37:40 +000044/// Check whether \p I is a currently unsupported binary operation:
45/// - it has an unsized type
46/// - an operand is not a vreg
47/// - all operands are not in the same bank
48/// These are checks that should someday live in the verifier, but right now,
49/// these are mostly limitations of the aarch64 selector.
50static bool unsupportedBinOp(const MachineInstr &I,
51 const AArch64RegisterBankInfo &RBI,
52 const MachineRegisterInfo &MRI,
53 const AArch64RegisterInfo &TRI) {
Tim Northover0f140c72016-09-09 11:46:34 +000054 LLT Ty = MRI.getType(I.getOperand(0).getReg());
Tim Northover32a078a2016-09-15 10:09:59 +000055 if (!Ty.isValid()) {
56 DEBUG(dbgs() << "Generic binop register should be typed\n");
Ahmed Bougacha59e160a2016-08-16 14:37:40 +000057 return true;
58 }
59
60 const RegisterBank *PrevOpBank = nullptr;
61 for (auto &MO : I.operands()) {
62 // FIXME: Support non-register operands.
63 if (!MO.isReg()) {
64 DEBUG(dbgs() << "Generic inst non-reg operands are unsupported\n");
65 return true;
66 }
67
68 // FIXME: Can generic operations have physical registers operands? If
69 // so, this will need to be taught about that, and we'll need to get the
70 // bank out of the minimal class for the register.
71 // Either way, this needs to be documented (and possibly verified).
72 if (!TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
73 DEBUG(dbgs() << "Generic inst has physical register operand\n");
74 return true;
75 }
76
77 const RegisterBank *OpBank = RBI.getRegBank(MO.getReg(), MRI, TRI);
78 if (!OpBank) {
79 DEBUG(dbgs() << "Generic register has no bank or class\n");
80 return true;
81 }
82
83 if (PrevOpBank && OpBank != PrevOpBank) {
84 DEBUG(dbgs() << "Generic inst operands have different banks\n");
85 return true;
86 }
87 PrevOpBank = OpBank;
88 }
89 return false;
90}
91
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +000092/// Select the AArch64 opcode for the basic binary operation \p GenericOpc
93/// (such as G_OR or G_ADD), appropriate for the register bank \p RegBankID
94/// and of size \p OpSize.
95/// \returns \p GenericOpc if the combination is unsupported.
96static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID,
97 unsigned OpSize) {
98 switch (RegBankID) {
99 case AArch64::GPRRegBankID:
100 switch (OpSize) {
101 case 32:
102 switch (GenericOpc) {
103 case TargetOpcode::G_OR:
104 return AArch64::ORRWrr;
Ahmed Bougacha6db3cfe2016-07-29 16:56:25 +0000105 case TargetOpcode::G_XOR:
106 return AArch64::EORWrr;
Ahmed Bougacha61a79282016-07-28 16:58:31 +0000107 case TargetOpcode::G_AND:
108 return AArch64::ANDWrr;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000109 case TargetOpcode::G_ADD:
110 return AArch64::ADDWrr;
Ahmed Bougachad7748d62016-07-28 16:58:35 +0000111 case TargetOpcode::G_SUB:
112 return AArch64::SUBWrr;
Ahmed Bougacha2ac5bf92016-08-16 14:02:47 +0000113 case TargetOpcode::G_SHL:
114 return AArch64::LSLVWr;
115 case TargetOpcode::G_LSHR:
116 return AArch64::LSRVWr;
117 case TargetOpcode::G_ASHR:
118 return AArch64::ASRVWr;
Ahmed Bougacha1d0560b2016-08-18 15:17:13 +0000119 case TargetOpcode::G_SDIV:
120 return AArch64::SDIVWr;
121 case TargetOpcode::G_UDIV:
122 return AArch64::UDIVWr;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000123 default:
124 return GenericOpc;
125 }
126 case 64:
127 switch (GenericOpc) {
128 case TargetOpcode::G_OR:
129 return AArch64::ORRXrr;
Ahmed Bougacha6db3cfe2016-07-29 16:56:25 +0000130 case TargetOpcode::G_XOR:
131 return AArch64::EORXrr;
Ahmed Bougacha61a79282016-07-28 16:58:31 +0000132 case TargetOpcode::G_AND:
133 return AArch64::ANDXrr;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000134 case TargetOpcode::G_ADD:
Tim Northover2fda4b02016-10-10 21:49:49 +0000135 case TargetOpcode::G_GEP:
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000136 return AArch64::ADDXrr;
Ahmed Bougachad7748d62016-07-28 16:58:35 +0000137 case TargetOpcode::G_SUB:
138 return AArch64::SUBXrr;
Ahmed Bougacha2ac5bf92016-08-16 14:02:47 +0000139 case TargetOpcode::G_SHL:
140 return AArch64::LSLVXr;
141 case TargetOpcode::G_LSHR:
142 return AArch64::LSRVXr;
143 case TargetOpcode::G_ASHR:
144 return AArch64::ASRVXr;
Ahmed Bougacha1d0560b2016-08-18 15:17:13 +0000145 case TargetOpcode::G_SDIV:
146 return AArch64::SDIVXr;
147 case TargetOpcode::G_UDIV:
148 return AArch64::UDIVXr;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000149 default:
150 return GenericOpc;
151 }
152 }
Ahmed Bougacha33e19fe2016-08-18 16:05:11 +0000153 case AArch64::FPRRegBankID:
154 switch (OpSize) {
155 case 32:
156 switch (GenericOpc) {
157 case TargetOpcode::G_FADD:
158 return AArch64::FADDSrr;
159 case TargetOpcode::G_FSUB:
160 return AArch64::FSUBSrr;
161 case TargetOpcode::G_FMUL:
162 return AArch64::FMULSrr;
163 case TargetOpcode::G_FDIV:
164 return AArch64::FDIVSrr;
165 default:
166 return GenericOpc;
167 }
168 case 64:
169 switch (GenericOpc) {
170 case TargetOpcode::G_FADD:
171 return AArch64::FADDDrr;
172 case TargetOpcode::G_FSUB:
173 return AArch64::FSUBDrr;
174 case TargetOpcode::G_FMUL:
175 return AArch64::FMULDrr;
176 case TargetOpcode::G_FDIV:
177 return AArch64::FDIVDrr;
Quentin Colombet0e531272016-10-11 00:21:11 +0000178 case TargetOpcode::G_OR:
179 return AArch64::ORRv8i8;
Ahmed Bougacha33e19fe2016-08-18 16:05:11 +0000180 default:
181 return GenericOpc;
182 }
183 }
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000184 };
185 return GenericOpc;
186}
187
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000188/// Select the AArch64 opcode for the G_LOAD or G_STORE operation \p GenericOpc,
189/// appropriate for the (value) register bank \p RegBankID and of memory access
190/// size \p OpSize. This returns the variant with the base+unsigned-immediate
191/// addressing mode (e.g., LDRXui).
192/// \returns \p GenericOpc if the combination is unsupported.
193static unsigned selectLoadStoreUIOp(unsigned GenericOpc, unsigned RegBankID,
194 unsigned OpSize) {
195 const bool isStore = GenericOpc == TargetOpcode::G_STORE;
196 switch (RegBankID) {
197 case AArch64::GPRRegBankID:
198 switch (OpSize) {
199 case 32:
200 return isStore ? AArch64::STRWui : AArch64::LDRWui;
201 case 64:
202 return isStore ? AArch64::STRXui : AArch64::LDRXui;
203 }
Quentin Colombetd2623f8e2016-10-11 00:21:14 +0000204 case AArch64::FPRRegBankID:
205 switch (OpSize) {
206 case 32:
207 return isStore ? AArch64::STRSui : AArch64::LDRSui;
208 case 64:
209 return isStore ? AArch64::STRDui : AArch64::LDRDui;
210 }
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000211 };
212 return GenericOpc;
213}
214
Quentin Colombetcb629a82016-10-12 03:57:49 +0000215static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII,
216 MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
217 const RegisterBankInfo &RBI) {
218
219 unsigned DstReg = I.getOperand(0).getReg();
220 if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
221 assert(I.isCopy() && "Generic operators do not allow physical registers");
222 return true;
223 }
224
225 const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
226 const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
227 unsigned SrcReg = I.getOperand(1).getReg();
228 const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
229 (void)SrcSize;
230 assert((!TargetRegisterInfo::isPhysicalRegister(SrcReg) || I.isCopy()) &&
231 "No phys reg on generic operators");
232 assert(
233 (DstSize == SrcSize ||
234 // Copies are a mean to setup initial types, the number of
235 // bits may not exactly match.
236 (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
237 DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI)) ||
238 // Copies are a mean to copy bits around, as long as we are
239 // on the same register class, that's fine. Otherwise, that
240 // means we need some SUBREG_TO_REG or AND & co.
241 (((DstSize + 31) / 32 == (SrcSize + 31) / 32) && DstSize > SrcSize)) &&
242 "Copy with different width?!");
243 assert((DstSize <= 64 || RegBank.getID() == AArch64::FPRRegBankID) &&
244 "GPRs cannot get more than 64-bit width values");
245 const TargetRegisterClass *RC = nullptr;
246
247 if (RegBank.getID() == AArch64::FPRRegBankID) {
248 if (DstSize <= 32)
249 RC = &AArch64::FPR32RegClass;
250 else if (DstSize <= 64)
251 RC = &AArch64::FPR64RegClass;
252 else if (DstSize <= 128)
253 RC = &AArch64::FPR128RegClass;
254 else {
255 DEBUG(dbgs() << "Unexpected bitcast size " << DstSize << '\n');
256 return false;
257 }
258 } else {
259 assert(RegBank.getID() == AArch64::GPRRegBankID &&
260 "Bitcast for the flags?");
261 RC =
262 DstSize <= 32 ? &AArch64::GPR32allRegClass : &AArch64::GPR64allRegClass;
263 }
264
265 // No need to constrain SrcReg. It will get constrained when
266 // we hit another of its use or its defs.
267 // Copies do not have constraints.
268 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
269 DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
270 << " operand\n");
271 return false;
272 }
273 I.setDesc(TII.get(AArch64::COPY));
274 return true;
275}
276
Tim Northover6c02ad52016-10-12 22:49:04 +0000277static AArch64CC::CondCode changeICMPPredToAArch64CC(CmpInst::Predicate P) {
278 switch (P) {
279 default:
280 llvm_unreachable("Unknown condition code!");
281 case CmpInst::ICMP_NE:
282 return AArch64CC::NE;
283 case CmpInst::ICMP_EQ:
284 return AArch64CC::EQ;
285 case CmpInst::ICMP_SGT:
286 return AArch64CC::GT;
287 case CmpInst::ICMP_SGE:
288 return AArch64CC::GE;
289 case CmpInst::ICMP_SLT:
290 return AArch64CC::LT;
291 case CmpInst::ICMP_SLE:
292 return AArch64CC::LE;
293 case CmpInst::ICMP_UGT:
294 return AArch64CC::HI;
295 case CmpInst::ICMP_UGE:
296 return AArch64CC::HS;
297 case CmpInst::ICMP_ULT:
298 return AArch64CC::LO;
299 case CmpInst::ICMP_ULE:
300 return AArch64CC::LS;
301 }
302}
303
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000304bool AArch64InstructionSelector::select(MachineInstr &I) const {
305 assert(I.getParent() && "Instruction should be in a basic block!");
306 assert(I.getParent()->getParent() && "Instruction should be in a function!");
307
308 MachineBasicBlock &MBB = *I.getParent();
309 MachineFunction &MF = *MBB.getParent();
310 MachineRegisterInfo &MRI = MF.getRegInfo();
311
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000312 if (!isPreISelGenericOpcode(I.getOpcode()))
Quentin Colombetcb629a82016-10-12 03:57:49 +0000313 return !I.isCopy() || selectCopy(I, TII, MRI, TRI, RBI);
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000314
315 if (I.getNumOperands() != I.getNumExplicitOperands()) {
316 DEBUG(dbgs() << "Generic instruction has unexpected implicit operands\n");
317 return false;
318 }
319
Tim Northover32a078a2016-09-15 10:09:59 +0000320 LLT Ty =
321 I.getOperand(0).isReg() ? MRI.getType(I.getOperand(0).getReg()) : LLT{};
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000322
Ahmed Bougacha85505092016-07-28 17:15:15 +0000323 switch (I.getOpcode()) {
324 case TargetOpcode::G_BR: {
325 I.setDesc(TII.get(AArch64::B));
Ahmed Bougacha85505092016-07-28 17:15:15 +0000326 return true;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000327 }
328
Tim Northover5e3dbf32016-10-12 22:49:01 +0000329 case TargetOpcode::G_BRCOND: {
330 if (Ty.getSizeInBits() > 32) {
331 // We shouldn't need this on AArch64, but it would be implemented as an
332 // EXTRACT_SUBREG followed by a TBNZW because TBNZX has no encoding if the
333 // bit being tested is < 32.
334 DEBUG(dbgs() << "G_BRCOND has type: " << Ty
335 << ", expected at most 32-bits");
336 return false;
337 }
338
339 const unsigned CondReg = I.getOperand(0).getReg();
340 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
341
342 auto MIB = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::TBNZW))
343 .addUse(CondReg)
344 .addImm(/*bit offset=*/0)
345 .addMBB(DestMBB);
346
347 I.eraseFromParent();
348 return constrainSelectedInstRegOperands(*MIB.getInstr(), TII, TRI, RBI);
349 }
350
Tim Northover4edc60d2016-10-10 21:49:42 +0000351 case TargetOpcode::G_CONSTANT: {
352 if (Ty.getSizeInBits() <= 32)
353 I.setDesc(TII.get(AArch64::MOVi32imm));
354 else if (Ty.getSizeInBits() <= 64)
355 I.setDesc(TII.get(AArch64::MOVi64imm));
356 else
357 return false;
358 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
359 }
360
Ahmed Bougacha0306b5e2016-08-16 14:02:42 +0000361 case TargetOpcode::G_FRAME_INDEX: {
362 // allocas and G_FRAME_INDEX are only supported in addrspace(0).
Tim Northover5ae83502016-09-15 09:20:34 +0000363 if (Ty != LLT::pointer(0, 64)) {
Tim Northover0f140c72016-09-09 11:46:34 +0000364 DEBUG(dbgs() << "G_FRAME_INDEX pointer has type: " << Ty
Tim Northover5ae83502016-09-15 09:20:34 +0000365 << ", expected: " << LLT::pointer(0, 64) << '\n');
Ahmed Bougacha0306b5e2016-08-16 14:02:42 +0000366 return false;
367 }
368
369 I.setDesc(TII.get(AArch64::ADDXri));
Ahmed Bougacha0306b5e2016-08-16 14:02:42 +0000370
371 // MOs for a #0 shifted immediate.
372 I.addOperand(MachineOperand::CreateImm(0));
373 I.addOperand(MachineOperand::CreateImm(0));
374
375 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
376 }
Tim Northoverbdf16242016-10-10 21:50:00 +0000377
378 case TargetOpcode::G_GLOBAL_VALUE: {
379 auto GV = I.getOperand(1).getGlobal();
380 if (GV->isThreadLocal()) {
381 // FIXME: we don't support TLS yet.
382 return false;
383 }
384 unsigned char OpFlags = STI.ClassifyGlobalReference(GV, TM);
385 if (OpFlags & AArch64II::MO_GOT)
386 I.setDesc(TII.get(AArch64::LOADgot));
387 else {
388 I.setDesc(TII.get(AArch64::MOVaddr));
389 I.getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_PAGE);
390 MachineInstrBuilder MIB(MF, I);
391 MIB.addGlobalAddress(GV, I.getOperand(1).getOffset(),
392 OpFlags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
393 }
394 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
395 }
396
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000397 case TargetOpcode::G_LOAD:
398 case TargetOpcode::G_STORE: {
Tim Northover0f140c72016-09-09 11:46:34 +0000399 LLT MemTy = Ty;
400 LLT PtrTy = MRI.getType(I.getOperand(1).getReg());
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000401
Tim Northover5ae83502016-09-15 09:20:34 +0000402 if (PtrTy != LLT::pointer(0, 64)) {
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000403 DEBUG(dbgs() << "Load/Store pointer has type: " << PtrTy
Tim Northover5ae83502016-09-15 09:20:34 +0000404 << ", expected: " << LLT::pointer(0, 64) << '\n');
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000405 return false;
406 }
407
408#ifndef NDEBUG
409 // Sanity-check the pointer register.
410 const unsigned PtrReg = I.getOperand(1).getReg();
411 const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, MRI, TRI);
412 assert(PtrRB.getID() == AArch64::GPRRegBankID &&
413 "Load/Store pointer operand isn't a GPR");
Tim Northover0f140c72016-09-09 11:46:34 +0000414 assert(MRI.getType(PtrReg).isPointer() &&
415 "Load/Store pointer operand isn't a pointer");
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000416#endif
417
418 const unsigned ValReg = I.getOperand(0).getReg();
419 const RegisterBank &RB = *RBI.getRegBank(ValReg, MRI, TRI);
420
421 const unsigned NewOpc =
422 selectLoadStoreUIOp(I.getOpcode(), RB.getID(), MemTy.getSizeInBits());
423 if (NewOpc == I.getOpcode())
424 return false;
425
426 I.setDesc(TII.get(NewOpc));
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000427
428 I.addOperand(MachineOperand::CreateImm(0));
429 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
430 }
431
Ahmed Bougachae4c03ab2016-08-16 14:37:46 +0000432 case TargetOpcode::G_MUL: {
433 // Reject the various things we don't support yet.
434 if (unsupportedBinOp(I, RBI, MRI, TRI))
435 return false;
436
437 const unsigned DefReg = I.getOperand(0).getReg();
438 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
439
440 if (RB.getID() != AArch64::GPRRegBankID) {
441 DEBUG(dbgs() << "G_MUL on bank: " << RB << ", expected: GPR\n");
442 return false;
443 }
444
445 unsigned ZeroReg;
446 unsigned NewOpc;
447 if (Ty == LLT::scalar(32)) {
448 NewOpc = AArch64::MADDWrrr;
449 ZeroReg = AArch64::WZR;
450 } else if (Ty == LLT::scalar(64)) {
451 NewOpc = AArch64::MADDXrrr;
452 ZeroReg = AArch64::XZR;
453 } else {
454 DEBUG(dbgs() << "G_MUL has type: " << Ty << ", expected: "
455 << LLT::scalar(32) << " or " << LLT::scalar(64) << '\n');
456 return false;
457 }
458
459 I.setDesc(TII.get(NewOpc));
Ahmed Bougachae4c03ab2016-08-16 14:37:46 +0000460
461 I.addOperand(MachineOperand::CreateReg(ZeroReg, /*isDef=*/false));
462
463 // Now that we selected an opcode, we need to constrain the register
464 // operands to use appropriate classes.
465 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
466 }
467
Ahmed Bougacha33e19fe2016-08-18 16:05:11 +0000468 case TargetOpcode::G_FADD:
469 case TargetOpcode::G_FSUB:
470 case TargetOpcode::G_FMUL:
471 case TargetOpcode::G_FDIV:
472
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000473 case TargetOpcode::G_OR:
Ahmed Bougacha6db3cfe2016-07-29 16:56:25 +0000474 case TargetOpcode::G_XOR:
Ahmed Bougacha61a79282016-07-28 16:58:31 +0000475 case TargetOpcode::G_AND:
Ahmed Bougacha2ac5bf92016-08-16 14:02:47 +0000476 case TargetOpcode::G_SHL:
477 case TargetOpcode::G_LSHR:
478 case TargetOpcode::G_ASHR:
Ahmed Bougacha1d0560b2016-08-18 15:17:13 +0000479 case TargetOpcode::G_SDIV:
480 case TargetOpcode::G_UDIV:
Ahmed Bougachad7748d62016-07-28 16:58:35 +0000481 case TargetOpcode::G_ADD:
Tim Northover2fda4b02016-10-10 21:49:49 +0000482 case TargetOpcode::G_SUB:
483 case TargetOpcode::G_GEP: {
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000484 // Reject the various things we don't support yet.
Ahmed Bougacha59e160a2016-08-16 14:37:40 +0000485 if (unsupportedBinOp(I, RBI, MRI, TRI))
486 return false;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000487
Ahmed Bougacha59e160a2016-08-16 14:37:40 +0000488 const unsigned OpSize = Ty.getSizeInBits();
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000489
490 const unsigned DefReg = I.getOperand(0).getReg();
491 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
492
493 const unsigned NewOpc = selectBinaryOp(I.getOpcode(), RB.getID(), OpSize);
494 if (NewOpc == I.getOpcode())
495 return false;
496
497 I.setDesc(TII.get(NewOpc));
498 // FIXME: Should the type be always reset in setDesc?
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000499
500 // Now that we selected an opcode, we need to constrain the register
501 // operands to use appropriate classes.
502 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
503 }
Tim Northover3d38b3a2016-10-11 20:50:21 +0000504
505 case TargetOpcode::G_ANYEXT: {
506 const unsigned DstReg = I.getOperand(0).getReg();
507 const unsigned SrcReg = I.getOperand(1).getReg();
508
Quentin Colombetcb629a82016-10-12 03:57:49 +0000509 const RegisterBank &RBDst = *RBI.getRegBank(DstReg, MRI, TRI);
510 if (RBDst.getID() != AArch64::GPRRegBankID) {
511 DEBUG(dbgs() << "G_ANYEXT on bank: " << RBDst << ", expected: GPR\n");
512 return false;
513 }
Tim Northover3d38b3a2016-10-11 20:50:21 +0000514
Quentin Colombetcb629a82016-10-12 03:57:49 +0000515 const RegisterBank &RBSrc = *RBI.getRegBank(SrcReg, MRI, TRI);
516 if (RBSrc.getID() != AArch64::GPRRegBankID) {
517 DEBUG(dbgs() << "G_ANYEXT on bank: " << RBSrc << ", expected: GPR\n");
Tim Northover3d38b3a2016-10-11 20:50:21 +0000518 return false;
519 }
520
521 const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
522
523 if (DstSize == 0) {
524 DEBUG(dbgs() << "G_ANYEXT operand has no size, not a gvreg?\n");
525 return false;
526 }
527
Quentin Colombetcb629a82016-10-12 03:57:49 +0000528 if (DstSize != 64 && DstSize > 32) {
Tim Northover3d38b3a2016-10-11 20:50:21 +0000529 DEBUG(dbgs() << "G_ANYEXT to size: " << DstSize
530 << ", expected: 32 or 64\n");
531 return false;
532 }
Quentin Colombetcb629a82016-10-12 03:57:49 +0000533 // At this point G_ANYEXT is just like a plain COPY, but we need
534 // to explicitly form the 64-bit value if any.
535 if (DstSize > 32) {
536 unsigned ExtSrc = MRI.createVirtualRegister(&AArch64::GPR64allRegClass);
537 BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
538 .addDef(ExtSrc)
539 .addImm(0)
540 .addUse(SrcReg)
541 .addImm(AArch64::sub_32);
542 I.getOperand(1).setReg(ExtSrc);
Tim Northover3d38b3a2016-10-11 20:50:21 +0000543 }
Quentin Colombetcb629a82016-10-12 03:57:49 +0000544 return selectCopy(I, TII, MRI, TRI, RBI);
Tim Northover3d38b3a2016-10-11 20:50:21 +0000545 }
546
547 case TargetOpcode::G_ZEXT:
548 case TargetOpcode::G_SEXT: {
549 unsigned Opcode = I.getOpcode();
550 const LLT DstTy = MRI.getType(I.getOperand(0).getReg()),
551 SrcTy = MRI.getType(I.getOperand(1).getReg());
552 const bool isSigned = Opcode == TargetOpcode::G_SEXT;
553 const unsigned DefReg = I.getOperand(0).getReg();
554 const unsigned SrcReg = I.getOperand(1).getReg();
555 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
556
557 if (RB.getID() != AArch64::GPRRegBankID) {
558 DEBUG(dbgs() << TII.getName(I.getOpcode()) << " on bank: " << RB
559 << ", expected: GPR\n");
560 return false;
561 }
562
563 MachineInstr *ExtI;
564 if (DstTy == LLT::scalar(64)) {
565 // FIXME: Can we avoid manually doing this?
566 if (!RBI.constrainGenericRegister(SrcReg, AArch64::GPR32RegClass, MRI)) {
567 DEBUG(dbgs() << "Failed to constrain " << TII.getName(Opcode)
568 << " operand\n");
569 return false;
570 }
571
572 const unsigned SrcXReg =
573 MRI.createVirtualRegister(&AArch64::GPR64RegClass);
574 BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
575 .addDef(SrcXReg)
576 .addImm(0)
577 .addUse(SrcReg)
578 .addImm(AArch64::sub_32);
579
580 const unsigned NewOpc = isSigned ? AArch64::SBFMXri : AArch64::UBFMXri;
581 ExtI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
582 .addDef(DefReg)
583 .addUse(SrcXReg)
584 .addImm(0)
585 .addImm(SrcTy.getSizeInBits() - 1);
586 } else if (DstTy == LLT::scalar(32)) {
587 const unsigned NewOpc = isSigned ? AArch64::SBFMWri : AArch64::UBFMWri;
588 ExtI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
589 .addDef(DefReg)
590 .addUse(SrcReg)
591 .addImm(0)
592 .addImm(SrcTy.getSizeInBits() - 1);
593 } else {
594 return false;
595 }
596
597 constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
598
599 I.eraseFromParent();
600 return true;
601 }
Tim Northoverc1d8c2b2016-10-11 22:29:23 +0000602
603 case TargetOpcode::G_INTTOPTR:
604 case TargetOpcode::G_PTRTOINT:
Quentin Colombet9de30fa2016-10-12 03:57:52 +0000605 case TargetOpcode::G_BITCAST:
606 return selectCopy(I, TII, MRI, TRI, RBI);
Tim Northover6c02ad52016-10-12 22:49:04 +0000607
608 case TargetOpcode::G_ICMP: {
609 if (Ty != LLT::scalar(1)) {
610 DEBUG(dbgs() << "G_ICMP result has type: " << Ty
611 << ", expected: " << LLT::scalar(1) << '\n');
612 return false;
613 }
614
615 unsigned CmpOpc = 0;
616 unsigned ZReg = 0;
617
618 LLT CmpTy = MRI.getType(I.getOperand(2).getReg());
619 if (CmpTy == LLT::scalar(32)) {
620 CmpOpc = AArch64::SUBSWrr;
621 ZReg = AArch64::WZR;
622 } else if (CmpTy == LLT::scalar(64) || CmpTy.isPointer()) {
623 CmpOpc = AArch64::SUBSXrr;
624 ZReg = AArch64::XZR;
625 } else {
626 return false;
627 }
628
629 const AArch64CC::CondCode CC = changeICMPPredToAArch64CC(
630 (CmpInst::Predicate)I.getOperand(1).getPredicate());
631
632 MachineInstr &CmpMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CmpOpc))
633 .addDef(ZReg)
634 .addUse(I.getOperand(2).getReg())
635 .addUse(I.getOperand(3).getReg());
636
637 MachineInstr &CSetMI =
638 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
639 .addDef(I.getOperand(0).getReg())
640 .addUse(AArch64::WZR)
641 .addUse(AArch64::WZR)
642 .addImm(CC);
643
644 constrainSelectedInstRegOperands(CmpMI, TII, TRI, RBI);
645 constrainSelectedInstRegOperands(CSetMI, TII, TRI, RBI);
646
647 I.eraseFromParent();
648 return true;
649 }
650
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000651 }
652
653 return false;
654}