blob: 0750f272799e1763d92ace72eac4c6276299e4b3 [file] [log] [blame]
Eugene Zelenko60433b62017-10-05 00:33:50 +00001//===- X86InstructionSelector.cpp -----------------------------------------===//
Igor Bregerf7359d82017-02-22 12:25:09 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file
10/// This file implements the targeting of the InstructionSelector class for
11/// X86.
12/// \todo This should be generated by TableGen.
13//===----------------------------------------------------------------------===//
14
Eugene Zelenko60433b62017-10-05 00:33:50 +000015#include "MCTargetDesc/X86BaseInfo.h"
Igor Bregera8ba5722017-03-23 15:25:57 +000016#include "X86InstrBuilder.h"
Igor Bregerf7359d82017-02-22 12:25:09 +000017#include "X86InstrInfo.h"
18#include "X86RegisterBankInfo.h"
19#include "X86RegisterInfo.h"
20#include "X86Subtarget.h"
21#include "X86TargetMachine.h"
Igor Breger3b97ea32017-04-12 12:54:54 +000022#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
Eugene Zelenko60433b62017-10-05 00:33:50 +000023#include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
24#include "llvm/CodeGen/GlobalISel/RegisterBank.h"
Igor Breger28f290f2017-05-17 12:48:08 +000025#include "llvm/CodeGen/GlobalISel/Utils.h"
Igor Bregerf7359d82017-02-22 12:25:09 +000026#include "llvm/CodeGen/MachineBasicBlock.h"
Igor Breger21200ed2017-09-17 08:08:13 +000027#include "llvm/CodeGen/MachineConstantPool.h"
Igor Bregerf7359d82017-02-22 12:25:09 +000028#include "llvm/CodeGen/MachineFunction.h"
29#include "llvm/CodeGen/MachineInstr.h"
30#include "llvm/CodeGen/MachineInstrBuilder.h"
Eugene Zelenko60433b62017-10-05 00:33:50 +000031#include "llvm/CodeGen/MachineMemOperand.h"
Daniel Sanders0b5293f2017-04-06 09:49:34 +000032#include "llvm/CodeGen/MachineOperand.h"
Igor Bregerf7359d82017-02-22 12:25:09 +000033#include "llvm/CodeGen/MachineRegisterInfo.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000034#include "llvm/CodeGen/TargetOpcodes.h"
35#include "llvm/CodeGen/TargetRegisterInfo.h"
Eugene Zelenko60433b62017-10-05 00:33:50 +000036#include "llvm/IR/DataLayout.h"
37#include "llvm/IR/InstrTypes.h"
38#include "llvm/Support/AtomicOrdering.h"
39#include "llvm/Support/CodeGen.h"
Igor Bregerf7359d82017-02-22 12:25:09 +000040#include "llvm/Support/Debug.h"
Eugene Zelenko60433b62017-10-05 00:33:50 +000041#include "llvm/Support/ErrorHandling.h"
42#include "llvm/Support/LowLevelTypeImpl.h"
43#include "llvm/Support/MathExtras.h"
Igor Bregerf7359d82017-02-22 12:25:09 +000044#include "llvm/Support/raw_ostream.h"
Eugene Zelenko60433b62017-10-05 00:33:50 +000045#include <cassert>
46#include <cstdint>
47#include <tuple>
Daniel Sanders6ab0daa2017-07-04 14:35:06 +000048
David Blaikie62651302017-10-26 23:39:54 +000049#define DEBUG_TYPE "X86-isel"
50
Igor Bregerf7359d82017-02-22 12:25:09 +000051using namespace llvm;
52
Daniel Sanders0b5293f2017-04-06 09:49:34 +000053namespace {
54
Daniel Sanderse7b0d662017-04-21 15:59:56 +000055#define GET_GLOBALISEL_PREDICATE_BITSET
56#include "X86GenGlobalISel.inc"
57#undef GET_GLOBALISEL_PREDICATE_BITSET
58
Daniel Sanders0b5293f2017-04-06 09:49:34 +000059class X86InstructionSelector : public InstructionSelector {
60public:
Daniel Sanderse7b0d662017-04-21 15:59:56 +000061 X86InstructionSelector(const X86TargetMachine &TM, const X86Subtarget &STI,
Daniel Sanders0b5293f2017-04-06 09:49:34 +000062 const X86RegisterBankInfo &RBI);
63
Daniel Sandersf76f3152017-11-16 00:46:35 +000064 bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const override;
David Blaikie62651302017-10-26 23:39:54 +000065 static const char *getName() { return DEBUG_TYPE; }
Daniel Sanders0b5293f2017-04-06 09:49:34 +000066
67private:
68 /// tblgen-erated 'select' implementation, used as the initial selector for
69 /// the patterns that don't require complex C++.
Daniel Sandersf76f3152017-11-16 00:46:35 +000070 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
Daniel Sanders0b5293f2017-04-06 09:49:34 +000071
Hiroshi Inouebb703e82017-07-02 03:24:54 +000072 // TODO: remove after supported by Tablegen-erated instruction selection.
Igor Breger21200ed2017-09-17 08:08:13 +000073 unsigned getLoadStoreOp(const LLT &Ty, const RegisterBank &RB, unsigned Opc,
Daniel Sanders0b5293f2017-04-06 09:49:34 +000074 uint64_t Alignment) const;
75
Daniel Sanders0b5293f2017-04-06 09:49:34 +000076 bool selectLoadStoreOp(MachineInstr &I, MachineRegisterInfo &MRI,
77 MachineFunction &MF) const;
Igor Breger810c6252017-05-08 09:40:43 +000078 bool selectFrameIndexOrGep(MachineInstr &I, MachineRegisterInfo &MRI,
79 MachineFunction &MF) const;
Igor Breger717bd362017-07-02 08:58:29 +000080 bool selectGlobalValue(MachineInstr &I, MachineRegisterInfo &MRI,
81 MachineFunction &MF) const;
Igor Breger3b97ea32017-04-12 12:54:54 +000082 bool selectConstant(MachineInstr &I, MachineRegisterInfo &MRI,
83 MachineFunction &MF) const;
Alexander Ivchenko46e07e32018-02-28 09:18:47 +000084 bool selectTruncOrPtrToInt(MachineInstr &I, MachineRegisterInfo &MRI,
85 MachineFunction &MF) const;
Igor Bregerfda31e62017-05-10 06:52:58 +000086 bool selectZext(MachineInstr &I, MachineRegisterInfo &MRI,
87 MachineFunction &MF) const;
Igor Breger1f143642017-09-11 09:41:13 +000088 bool selectAnyext(MachineInstr &I, MachineRegisterInfo &MRI,
89 MachineFunction &MF) const;
Igor Bregerc7b59772017-05-11 07:17:40 +000090 bool selectCmp(MachineInstr &I, MachineRegisterInfo &MRI,
91 MachineFunction &MF) const;
Igor Breger28f290f2017-05-17 12:48:08 +000092 bool selectUadde(MachineInstr &I, MachineRegisterInfo &MRI,
93 MachineFunction &MF) const;
Igor Breger1dcd5e82017-06-20 09:15:10 +000094 bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const;
Igor Bregerb186a692017-07-02 08:15:49 +000095 bool selectUnmergeValues(MachineInstr &I, MachineRegisterInfo &MRI,
Daniel Sandersf76f3152017-11-16 00:46:35 +000096 MachineFunction &MF,
97 CodeGenCoverage &CoverageInfo) const;
Igor Breger0cddd342017-06-29 12:08:28 +000098 bool selectMergeValues(MachineInstr &I, MachineRegisterInfo &MRI,
Daniel Sandersf76f3152017-11-16 00:46:35 +000099 MachineFunction &MF,
100 CodeGenCoverage &CoverageInfo) const;
Igor Breger1c29be72017-06-22 09:43:35 +0000101 bool selectInsert(MachineInstr &I, MachineRegisterInfo &MRI,
102 MachineFunction &MF) const;
Igor Bregerf5035d62017-06-25 11:42:17 +0000103 bool selectExtract(MachineInstr &I, MachineRegisterInfo &MRI,
104 MachineFunction &MF) const;
Igor Breger685889c2017-08-21 10:51:54 +0000105 bool selectCondBranch(MachineInstr &I, MachineRegisterInfo &MRI,
106 MachineFunction &MF) const;
Alexander Ivchenkoda9e81c2018-02-08 22:41:47 +0000107 bool selectTurnIntoCOPY(MachineInstr &I, MachineRegisterInfo &MRI,
108 const unsigned DstReg,
109 const TargetRegisterClass *DstRC,
110 const unsigned SrcReg,
111 const TargetRegisterClass *SrcRC) const;
Igor Breger21200ed2017-09-17 08:08:13 +0000112 bool materializeFP(MachineInstr &I, MachineRegisterInfo &MRI,
113 MachineFunction &MF) const;
Igor Breger2661ae42017-09-04 09:06:45 +0000114 bool selectImplicitDefOrPHI(MachineInstr &I, MachineRegisterInfo &MRI) const;
Alexander Ivchenko0bd4d8c2018-03-14 11:23:57 +0000115 bool selectShift(MachineInstr &I, MachineRegisterInfo &MRI,
116 MachineFunction &MF) const;
Igor Breger1c29be72017-06-22 09:43:35 +0000117
118 // emit insert subreg instruction and insert it before MachineInstr &I
119 bool emitInsertSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I,
120 MachineRegisterInfo &MRI, MachineFunction &MF) const;
Igor Bregerf5035d62017-06-25 11:42:17 +0000121 // emit extract subreg instruction and insert it before MachineInstr &I
122 bool emitExtractSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I,
123 MachineRegisterInfo &MRI, MachineFunction &MF) const;
Igor Breger1dcd5e82017-06-20 09:15:10 +0000124
125 const TargetRegisterClass *getRegClass(LLT Ty, const RegisterBank &RB) const;
126 const TargetRegisterClass *getRegClass(LLT Ty, unsigned Reg,
127 MachineRegisterInfo &MRI) const;
Igor Breger28f290f2017-05-17 12:48:08 +0000128
Daniel Sanderse7b0d662017-04-21 15:59:56 +0000129 const X86TargetMachine &TM;
Daniel Sanders0b5293f2017-04-06 09:49:34 +0000130 const X86Subtarget &STI;
131 const X86InstrInfo &TII;
132 const X86RegisterInfo &TRI;
133 const X86RegisterBankInfo &RBI;
Daniel Sanderse7b0d662017-04-21 15:59:56 +0000134
Daniel Sanderse9fdba32017-04-29 17:30:09 +0000135#define GET_GLOBALISEL_PREDICATES_DECL
136#include "X86GenGlobalISel.inc"
137#undef GET_GLOBALISEL_PREDICATES_DECL
Daniel Sanders0b5293f2017-04-06 09:49:34 +0000138
139#define GET_GLOBALISEL_TEMPORARIES_DECL
140#include "X86GenGlobalISel.inc"
141#undef GET_GLOBALISEL_TEMPORARIES_DECL
142};
143
144} // end anonymous namespace
145
Daniel Sanders8a4bae92017-03-14 21:32:08 +0000146#define GET_GLOBALISEL_IMPL
Igor Bregerf7359d82017-02-22 12:25:09 +0000147#include "X86GenGlobalISel.inc"
Daniel Sanders8a4bae92017-03-14 21:32:08 +0000148#undef GET_GLOBALISEL_IMPL
Igor Bregerf7359d82017-02-22 12:25:09 +0000149
Daniel Sanderse7b0d662017-04-21 15:59:56 +0000150X86InstructionSelector::X86InstructionSelector(const X86TargetMachine &TM,
151 const X86Subtarget &STI,
Igor Bregerf7359d82017-02-22 12:25:09 +0000152 const X86RegisterBankInfo &RBI)
Daniel Sanderse7b0d662017-04-21 15:59:56 +0000153 : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
Daniel Sanderse9fdba32017-04-29 17:30:09 +0000154 TRI(*STI.getRegisterInfo()), RBI(RBI),
155#define GET_GLOBALISEL_PREDICATES_INIT
156#include "X86GenGlobalISel.inc"
157#undef GET_GLOBALISEL_PREDICATES_INIT
Daniel Sanders8a4bae92017-03-14 21:32:08 +0000158#define GET_GLOBALISEL_TEMPORARIES_INIT
159#include "X86GenGlobalISel.inc"
160#undef GET_GLOBALISEL_TEMPORARIES_INIT
161{
162}
Igor Bregerf7359d82017-02-22 12:25:09 +0000163
164// FIXME: This should be target-independent, inferred from the types declared
165// for each class in the bank.
Igor Breger1dcd5e82017-06-20 09:15:10 +0000166const TargetRegisterClass *
167X86InstructionSelector::getRegClass(LLT Ty, const RegisterBank &RB) const {
Igor Bregerf7359d82017-02-22 12:25:09 +0000168 if (RB.getID() == X86::GPRRegBankID) {
Igor Breger4fdf1e42017-04-19 11:34:59 +0000169 if (Ty.getSizeInBits() <= 8)
170 return &X86::GR8RegClass;
171 if (Ty.getSizeInBits() == 16)
172 return &X86::GR16RegClass;
Igor Breger321cf3c2017-03-03 08:06:46 +0000173 if (Ty.getSizeInBits() == 32)
Igor Bregerf7359d82017-02-22 12:25:09 +0000174 return &X86::GR32RegClass;
175 if (Ty.getSizeInBits() == 64)
176 return &X86::GR64RegClass;
177 }
Igor Breger321cf3c2017-03-03 08:06:46 +0000178 if (RB.getID() == X86::VECRRegBankID) {
179 if (Ty.getSizeInBits() == 32)
Igor Breger1dcd5e82017-06-20 09:15:10 +0000180 return STI.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
Igor Breger321cf3c2017-03-03 08:06:46 +0000181 if (Ty.getSizeInBits() == 64)
Igor Breger1dcd5e82017-06-20 09:15:10 +0000182 return STI.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
Igor Breger321cf3c2017-03-03 08:06:46 +0000183 if (Ty.getSizeInBits() == 128)
Igor Breger1dcd5e82017-06-20 09:15:10 +0000184 return STI.hasAVX512() ? &X86::VR128XRegClass : &X86::VR128RegClass;
Igor Breger321cf3c2017-03-03 08:06:46 +0000185 if (Ty.getSizeInBits() == 256)
Igor Breger1dcd5e82017-06-20 09:15:10 +0000186 return STI.hasAVX512() ? &X86::VR256XRegClass : &X86::VR256RegClass;
Igor Breger321cf3c2017-03-03 08:06:46 +0000187 if (Ty.getSizeInBits() == 512)
188 return &X86::VR512RegClass;
189 }
Igor Bregerf7359d82017-02-22 12:25:09 +0000190
191 llvm_unreachable("Unknown RegBank!");
192}
193
Igor Breger1dcd5e82017-06-20 09:15:10 +0000194const TargetRegisterClass *
195X86InstructionSelector::getRegClass(LLT Ty, unsigned Reg,
196 MachineRegisterInfo &MRI) const {
197 const RegisterBank &RegBank = *RBI.getRegBank(Reg, MRI, TRI);
198 return getRegClass(Ty, RegBank);
199}
200
Benjamin Kramer49a49fe2017-08-20 13:03:48 +0000201static unsigned getSubRegIndex(const TargetRegisterClass *RC) {
Igor Bregerb3a860a2017-08-20 07:14:40 +0000202 unsigned SubIdx = X86::NoSubRegister;
203 if (RC == &X86::GR32RegClass) {
204 SubIdx = X86::sub_32bit;
205 } else if (RC == &X86::GR16RegClass) {
206 SubIdx = X86::sub_16bit;
207 } else if (RC == &X86::GR8RegClass) {
208 SubIdx = X86::sub_8bit;
209 }
210
211 return SubIdx;
212}
213
Benjamin Kramer49a49fe2017-08-20 13:03:48 +0000214static const TargetRegisterClass *getRegClassFromGRPhysReg(unsigned Reg) {
Igor Bregerb3a860a2017-08-20 07:14:40 +0000215 assert(TargetRegisterInfo::isPhysicalRegister(Reg));
216 if (X86::GR64RegClass.contains(Reg))
217 return &X86::GR64RegClass;
218 if (X86::GR32RegClass.contains(Reg))
219 return &X86::GR32RegClass;
220 if (X86::GR16RegClass.contains(Reg))
221 return &X86::GR16RegClass;
222 if (X86::GR8RegClass.contains(Reg))
223 return &X86::GR8RegClass;
224
225 llvm_unreachable("Unknown RegClass for PhysReg!");
226}
227
Igor Bregerf7359d82017-02-22 12:25:09 +0000228// Set X86 Opcode and constrain DestReg.
Igor Breger1dcd5e82017-06-20 09:15:10 +0000229bool X86InstructionSelector::selectCopy(MachineInstr &I,
230 MachineRegisterInfo &MRI) const {
Igor Bregerf7359d82017-02-22 12:25:09 +0000231 unsigned DstReg = I.getOperand(0).getReg();
Igor Bregerb3a860a2017-08-20 07:14:40 +0000232 const unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
233 const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
Igor Bregerf7359d82017-02-22 12:25:09 +0000234
Igor Bregerf7359d82017-02-22 12:25:09 +0000235 unsigned SrcReg = I.getOperand(1).getReg();
236 const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
Igor Bregerb3a860a2017-08-20 07:14:40 +0000237 const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
238
239 if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
240 assert(I.isCopy() && "Generic operators do not allow physical registers");
241
242 if (DstSize > SrcSize && SrcRegBank.getID() == X86::GPRRegBankID &&
243 DstRegBank.getID() == X86::GPRRegBankID) {
244
245 const TargetRegisterClass *SrcRC =
246 getRegClass(MRI.getType(SrcReg), SrcRegBank);
247 const TargetRegisterClass *DstRC = getRegClassFromGRPhysReg(DstReg);
248
249 if (SrcRC != DstRC) {
250 // This case can be generated by ABI lowering, performe anyext
251 unsigned ExtSrc = MRI.createVirtualRegister(DstRC);
252 BuildMI(*I.getParent(), I, I.getDebugLoc(),
253 TII.get(TargetOpcode::SUBREG_TO_REG))
254 .addDef(ExtSrc)
255 .addImm(0)
256 .addReg(SrcReg)
257 .addImm(getSubRegIndex(SrcRC));
258
259 I.getOperand(1).setReg(ExtSrc);
260 }
261 }
262
263 return true;
264 }
Igor Breger360d0f22017-04-27 08:02:03 +0000265
Igor Bregerf7359d82017-02-22 12:25:09 +0000266 assert((!TargetRegisterInfo::isPhysicalRegister(SrcReg) || I.isCopy()) &&
267 "No phys reg on generic operators");
268 assert((DstSize == SrcSize ||
269 // Copies are a mean to setup initial types, the number of
270 // bits may not exactly match.
271 (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
272 DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI))) &&
273 "Copy with different width?!");
274
Igor Bregerb3a860a2017-08-20 07:14:40 +0000275 const TargetRegisterClass *DstRC =
276 getRegClass(MRI.getType(DstReg), DstRegBank);
Igor Bregerf7359d82017-02-22 12:25:09 +0000277
Igor Bregerb3a860a2017-08-20 07:14:40 +0000278 if (SrcRegBank.getID() == X86::GPRRegBankID &&
279 DstRegBank.getID() == X86::GPRRegBankID && SrcSize > DstSize &&
280 TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
281 // Change the physical register to performe truncate.
Igor Breger360d0f22017-04-27 08:02:03 +0000282
Igor Bregerb3a860a2017-08-20 07:14:40 +0000283 const TargetRegisterClass *SrcRC = getRegClassFromGRPhysReg(SrcReg);
Igor Breger360d0f22017-04-27 08:02:03 +0000284
Igor Bregerb3a860a2017-08-20 07:14:40 +0000285 if (DstRC != SrcRC) {
286 I.getOperand(1).setSubReg(getSubRegIndex(DstRC));
Igor Breger360d0f22017-04-27 08:02:03 +0000287 I.getOperand(1).substPhysReg(SrcReg, TRI);
288 }
Igor Bregerf7359d82017-02-22 12:25:09 +0000289 }
290
291 // No need to constrain SrcReg. It will get constrained when
292 // we hit another of its use or its defs.
293 // Copies do not have constraints.
Igor Breger8a924be2017-03-23 12:13:29 +0000294 const TargetRegisterClass *OldRC = MRI.getRegClassOrNull(DstReg);
Igor Bregerb3a860a2017-08-20 07:14:40 +0000295 if (!OldRC || !DstRC->hasSubClassEq(OldRC)) {
296 if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
Igor Breger8a924be2017-03-23 12:13:29 +0000297 DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
298 << " operand\n");
299 return false;
300 }
Igor Bregerf7359d82017-02-22 12:25:09 +0000301 }
302 I.setDesc(TII.get(X86::COPY));
303 return true;
304}
305
Daniel Sandersf76f3152017-11-16 00:46:35 +0000306bool X86InstructionSelector::select(MachineInstr &I,
307 CodeGenCoverage &CoverageInfo) const {
Igor Bregerf7359d82017-02-22 12:25:09 +0000308 assert(I.getParent() && "Instruction should be in a basic block!");
309 assert(I.getParent()->getParent() && "Instruction should be in a function!");
310
311 MachineBasicBlock &MBB = *I.getParent();
312 MachineFunction &MF = *MBB.getParent();
313 MachineRegisterInfo &MRI = MF.getRegInfo();
314
315 unsigned Opcode = I.getOpcode();
316 if (!isPreISelGenericOpcode(Opcode)) {
317 // Certain non-generic instructions also need some special handling.
318
Igor Breger03c22082017-08-21 09:17:28 +0000319 if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
320 return false;
Igor Breger03c22082017-08-21 09:17:28 +0000321
Igor Bregerf7359d82017-02-22 12:25:09 +0000322 if (I.isCopy())
Igor Breger1dcd5e82017-06-20 09:15:10 +0000323 return selectCopy(I, MRI);
Igor Bregerf7359d82017-02-22 12:25:09 +0000324
Igor Bregerf7359d82017-02-22 12:25:09 +0000325 return true;
326 }
327
Benjamin Kramer5a7e0f82017-02-22 12:59:47 +0000328 assert(I.getNumOperands() == I.getNumExplicitOperands() &&
329 "Generic instruction has unexpected implicit operands\n");
Igor Bregerf7359d82017-02-22 12:25:09 +0000330
Daniel Sandersf76f3152017-11-16 00:46:35 +0000331 if (selectImpl(I, CoverageInfo))
Igor Bregerfda31e62017-05-10 06:52:58 +0000332 return true;
Igor Breger2452ef02017-05-01 07:06:08 +0000333
334 DEBUG(dbgs() << " C++ instruction selection: "; I.print(dbgs()));
335
336 // TODO: This should be implemented by tblgen.
Igor Breger06335bb2017-09-17 14:02:19 +0000337 switch (I.getOpcode()) {
338 default:
339 return false;
340 case TargetOpcode::G_STORE:
341 case TargetOpcode::G_LOAD:
342 return selectLoadStoreOp(I, MRI, MF);
343 case TargetOpcode::G_GEP:
344 case TargetOpcode::G_FRAME_INDEX:
345 return selectFrameIndexOrGep(I, MRI, MF);
346 case TargetOpcode::G_GLOBAL_VALUE:
347 return selectGlobalValue(I, MRI, MF);
348 case TargetOpcode::G_CONSTANT:
349 return selectConstant(I, MRI, MF);
350 case TargetOpcode::G_FCONSTANT:
351 return materializeFP(I, MRI, MF);
Alexander Ivchenko46e07e32018-02-28 09:18:47 +0000352 case TargetOpcode::G_PTRTOINT:
Igor Breger06335bb2017-09-17 14:02:19 +0000353 case TargetOpcode::G_TRUNC:
Alexander Ivchenko46e07e32018-02-28 09:18:47 +0000354 return selectTruncOrPtrToInt(I, MRI, MF);
Alexander Ivchenkoc01f7502018-02-28 12:11:53 +0000355 case TargetOpcode::G_INTTOPTR:
356 return selectCopy(I, MRI);
Igor Breger06335bb2017-09-17 14:02:19 +0000357 case TargetOpcode::G_ZEXT:
358 return selectZext(I, MRI, MF);
359 case TargetOpcode::G_ANYEXT:
360 return selectAnyext(I, MRI, MF);
361 case TargetOpcode::G_ICMP:
362 return selectCmp(I, MRI, MF);
363 case TargetOpcode::G_UADDE:
364 return selectUadde(I, MRI, MF);
365 case TargetOpcode::G_UNMERGE_VALUES:
Daniel Sandersf76f3152017-11-16 00:46:35 +0000366 return selectUnmergeValues(I, MRI, MF, CoverageInfo);
Igor Breger06335bb2017-09-17 14:02:19 +0000367 case TargetOpcode::G_MERGE_VALUES:
Daniel Sandersf76f3152017-11-16 00:46:35 +0000368 return selectMergeValues(I, MRI, MF, CoverageInfo);
Igor Breger06335bb2017-09-17 14:02:19 +0000369 case TargetOpcode::G_EXTRACT:
370 return selectExtract(I, MRI, MF);
371 case TargetOpcode::G_INSERT:
372 return selectInsert(I, MRI, MF);
373 case TargetOpcode::G_BRCOND:
374 return selectCondBranch(I, MRI, MF);
375 case TargetOpcode::G_IMPLICIT_DEF:
376 case TargetOpcode::G_PHI:
377 return selectImplicitDefOrPHI(I, MRI);
Alexander Ivchenko0bd4d8c2018-03-14 11:23:57 +0000378 case TargetOpcode::G_SHL:
379 case TargetOpcode::G_ASHR:
380 case TargetOpcode::G_LSHR:
381 return selectShift(I, MRI, MF);
Igor Breger06335bb2017-09-17 14:02:19 +0000382 }
Igor Breger321cf3c2017-03-03 08:06:46 +0000383
Igor Breger2452ef02017-05-01 07:06:08 +0000384 return false;
Igor Bregerf7359d82017-02-22 12:25:09 +0000385}
Igor Breger321cf3c2017-03-03 08:06:46 +0000386
Igor Breger21200ed2017-09-17 08:08:13 +0000387unsigned X86InstructionSelector::getLoadStoreOp(const LLT &Ty,
388 const RegisterBank &RB,
Igor Bregera8ba5722017-03-23 15:25:57 +0000389 unsigned Opc,
390 uint64_t Alignment) const {
391 bool Isload = (Opc == TargetOpcode::G_LOAD);
392 bool HasAVX = STI.hasAVX();
393 bool HasAVX512 = STI.hasAVX512();
394 bool HasVLX = STI.hasVLX();
395
396 if (Ty == LLT::scalar(8)) {
397 if (X86::GPRRegBankID == RB.getID())
398 return Isload ? X86::MOV8rm : X86::MOV8mr;
399 } else if (Ty == LLT::scalar(16)) {
400 if (X86::GPRRegBankID == RB.getID())
401 return Isload ? X86::MOV16rm : X86::MOV16mr;
Igor Bregera9edb882017-05-01 06:08:32 +0000402 } else if (Ty == LLT::scalar(32) || Ty == LLT::pointer(0, 32)) {
Igor Bregera8ba5722017-03-23 15:25:57 +0000403 if (X86::GPRRegBankID == RB.getID())
404 return Isload ? X86::MOV32rm : X86::MOV32mr;
405 if (X86::VECRRegBankID == RB.getID())
406 return Isload ? (HasAVX512 ? X86::VMOVSSZrm
407 : HasAVX ? X86::VMOVSSrm : X86::MOVSSrm)
408 : (HasAVX512 ? X86::VMOVSSZmr
409 : HasAVX ? X86::VMOVSSmr : X86::MOVSSmr);
Igor Bregera9edb882017-05-01 06:08:32 +0000410 } else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) {
Igor Bregera8ba5722017-03-23 15:25:57 +0000411 if (X86::GPRRegBankID == RB.getID())
412 return Isload ? X86::MOV64rm : X86::MOV64mr;
413 if (X86::VECRRegBankID == RB.getID())
414 return Isload ? (HasAVX512 ? X86::VMOVSDZrm
415 : HasAVX ? X86::VMOVSDrm : X86::MOVSDrm)
416 : (HasAVX512 ? X86::VMOVSDZmr
417 : HasAVX ? X86::VMOVSDmr : X86::MOVSDmr);
418 } else if (Ty.isVector() && Ty.getSizeInBits() == 128) {
419 if (Alignment >= 16)
420 return Isload ? (HasVLX ? X86::VMOVAPSZ128rm
421 : HasAVX512
422 ? X86::VMOVAPSZ128rm_NOVLX
423 : HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm)
424 : (HasVLX ? X86::VMOVAPSZ128mr
425 : HasAVX512
426 ? X86::VMOVAPSZ128mr_NOVLX
427 : HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
428 else
429 return Isload ? (HasVLX ? X86::VMOVUPSZ128rm
430 : HasAVX512
431 ? X86::VMOVUPSZ128rm_NOVLX
432 : HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm)
433 : (HasVLX ? X86::VMOVUPSZ128mr
434 : HasAVX512
435 ? X86::VMOVUPSZ128mr_NOVLX
436 : HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
Igor Breger617be6e2017-05-23 08:23:51 +0000437 } else if (Ty.isVector() && Ty.getSizeInBits() == 256) {
438 if (Alignment >= 32)
439 return Isload ? (HasVLX ? X86::VMOVAPSZ256rm
440 : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
441 : X86::VMOVAPSYrm)
442 : (HasVLX ? X86::VMOVAPSZ256mr
443 : HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX
444 : X86::VMOVAPSYmr);
445 else
446 return Isload ? (HasVLX ? X86::VMOVUPSZ256rm
447 : HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX
448 : X86::VMOVUPSYrm)
449 : (HasVLX ? X86::VMOVUPSZ256mr
450 : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
451 : X86::VMOVUPSYmr);
452 } else if (Ty.isVector() && Ty.getSizeInBits() == 512) {
453 if (Alignment >= 64)
454 return Isload ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
455 else
456 return Isload ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
Igor Bregera8ba5722017-03-23 15:25:57 +0000457 }
458 return Opc;
459}
460
Igor Bregerbd2deda2017-06-19 13:12:57 +0000461// Fill in an address from the given instruction.
Benjamin Kramer49a49fe2017-08-20 13:03:48 +0000462static void X86SelectAddress(const MachineInstr &I,
463 const MachineRegisterInfo &MRI,
464 X86AddressMode &AM) {
Igor Bregerbd2deda2017-06-19 13:12:57 +0000465 assert(I.getOperand(0).isReg() && "unsupported opperand.");
466 assert(MRI.getType(I.getOperand(0).getReg()).isPointer() &&
467 "unsupported type.");
468
469 if (I.getOpcode() == TargetOpcode::G_GEP) {
470 if (auto COff = getConstantVRegVal(I.getOperand(2).getReg(), MRI)) {
471 int64_t Imm = *COff;
472 if (isInt<32>(Imm)) { // Check for displacement overflow.
473 AM.Disp = static_cast<int32_t>(Imm);
474 AM.Base.Reg = I.getOperand(1).getReg();
475 return;
476 }
477 }
478 } else if (I.getOpcode() == TargetOpcode::G_FRAME_INDEX) {
479 AM.Base.FrameIndex = I.getOperand(1).getIndex();
480 AM.BaseType = X86AddressMode::FrameIndexBase;
481 return;
482 }
483
484 // Default behavior.
485 AM.Base.Reg = I.getOperand(0).getReg();
Igor Bregerbd2deda2017-06-19 13:12:57 +0000486}
487
Igor Bregera8ba5722017-03-23 15:25:57 +0000488bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,
489 MachineRegisterInfo &MRI,
490 MachineFunction &MF) const {
Igor Bregera8ba5722017-03-23 15:25:57 +0000491 unsigned Opc = I.getOpcode();
492
Igor Breger06335bb2017-09-17 14:02:19 +0000493 assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&
494 "unexpected instruction");
Igor Bregera8ba5722017-03-23 15:25:57 +0000495
496 const unsigned DefReg = I.getOperand(0).getReg();
497 LLT Ty = MRI.getType(DefReg);
498 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
499
500 auto &MemOp = **I.memoperands_begin();
Daniel Sanders3c1c4c02017-12-05 05:52:07 +0000501 if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
502 DEBUG(dbgs() << "Atomic load/store not supported yet\n");
503 return false;
504 }
505
Igor Bregera8ba5722017-03-23 15:25:57 +0000506 unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc, MemOp.getAlignment());
507 if (NewOpc == Opc)
508 return false;
509
Igor Bregerbd2deda2017-06-19 13:12:57 +0000510 X86AddressMode AM;
511 X86SelectAddress(*MRI.getVRegDef(I.getOperand(1).getReg()), MRI, AM);
512
Igor Bregera8ba5722017-03-23 15:25:57 +0000513 I.setDesc(TII.get(NewOpc));
514 MachineInstrBuilder MIB(MF, I);
Igor Bregerbd2deda2017-06-19 13:12:57 +0000515 if (Opc == TargetOpcode::G_LOAD) {
516 I.RemoveOperand(1);
517 addFullAddress(MIB, AM);
518 } else {
Igor Bregera8ba5722017-03-23 15:25:57 +0000519 // G_STORE (VAL, Addr), X86Store instruction (Addr, VAL)
Igor Bregerbd2deda2017-06-19 13:12:57 +0000520 I.RemoveOperand(1);
Igor Bregera8ba5722017-03-23 15:25:57 +0000521 I.RemoveOperand(0);
Igor Bregerbd2deda2017-06-19 13:12:57 +0000522 addFullAddress(MIB, AM).addUse(DefReg);
Igor Bregera8ba5722017-03-23 15:25:57 +0000523 }
524 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
525}
526
Igor Breger717bd362017-07-02 08:58:29 +0000527static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI) {
528 if (Ty == LLT::pointer(0, 64))
529 return X86::LEA64r;
530 else if (Ty == LLT::pointer(0, 32))
531 return STI.isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r;
532 else
533 llvm_unreachable("Can't get LEA opcode. Unsupported type.");
534}
535
Igor Breger810c6252017-05-08 09:40:43 +0000536bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I,
537 MachineRegisterInfo &MRI,
538 MachineFunction &MF) const {
539 unsigned Opc = I.getOpcode();
540
Igor Breger06335bb2017-09-17 14:02:19 +0000541 assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_GEP) &&
542 "unexpected instruction");
Igor Breger531a2032017-03-26 08:11:12 +0000543
544 const unsigned DefReg = I.getOperand(0).getReg();
545 LLT Ty = MRI.getType(DefReg);
546
Igor Breger810c6252017-05-08 09:40:43 +0000547 // Use LEA to calculate frame index and GEP
Igor Breger717bd362017-07-02 08:58:29 +0000548 unsigned NewOpc = getLeaOP(Ty, STI);
Igor Breger531a2032017-03-26 08:11:12 +0000549 I.setDesc(TII.get(NewOpc));
550 MachineInstrBuilder MIB(MF, I);
Igor Breger810c6252017-05-08 09:40:43 +0000551
552 if (Opc == TargetOpcode::G_FRAME_INDEX) {
553 addOffset(MIB, 0);
554 } else {
555 MachineOperand &InxOp = I.getOperand(2);
556 I.addOperand(InxOp); // set IndexReg
557 InxOp.ChangeToImmediate(1); // set Scale
558 MIB.addImm(0).addReg(0);
559 }
Igor Breger531a2032017-03-26 08:11:12 +0000560
561 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
562}
Daniel Sanders0b5293f2017-04-06 09:49:34 +0000563
Igor Breger717bd362017-07-02 08:58:29 +0000564bool X86InstructionSelector::selectGlobalValue(MachineInstr &I,
565 MachineRegisterInfo &MRI,
566 MachineFunction &MF) const {
Igor Breger06335bb2017-09-17 14:02:19 +0000567 assert((I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE) &&
568 "unexpected instruction");
Igor Breger717bd362017-07-02 08:58:29 +0000569
570 auto GV = I.getOperand(1).getGlobal();
571 if (GV->isThreadLocal()) {
572 return false; // TODO: we don't support TLS yet.
573 }
574
575 // Can't handle alternate code models yet.
576 if (TM.getCodeModel() != CodeModel::Small)
Eugene Zelenko60433b62017-10-05 00:33:50 +0000577 return false;
Igor Breger717bd362017-07-02 08:58:29 +0000578
579 X86AddressMode AM;
580 AM.GV = GV;
581 AM.GVOpFlags = STI.classifyGlobalReference(GV);
582
583 // TODO: The ABI requires an extra load. not supported yet.
584 if (isGlobalStubReference(AM.GVOpFlags))
585 return false;
586
587 // TODO: This reference is relative to the pic base. not supported yet.
588 if (isGlobalRelativeToPICBase(AM.GVOpFlags))
589 return false;
590
591 if (STI.isPICStyleRIPRel()) {
592 // Use rip-relative addressing.
593 assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
594 AM.Base.Reg = X86::RIP;
595 }
596
597 const unsigned DefReg = I.getOperand(0).getReg();
598 LLT Ty = MRI.getType(DefReg);
599 unsigned NewOpc = getLeaOP(Ty, STI);
600
601 I.setDesc(TII.get(NewOpc));
602 MachineInstrBuilder MIB(MF, I);
603
604 I.RemoveOperand(1);
605 addFullAddress(MIB, AM);
606
607 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
608}
609
Igor Breger3b97ea32017-04-12 12:54:54 +0000610bool X86InstructionSelector::selectConstant(MachineInstr &I,
611 MachineRegisterInfo &MRI,
612 MachineFunction &MF) const {
Igor Breger06335bb2017-09-17 14:02:19 +0000613 assert((I.getOpcode() == TargetOpcode::G_CONSTANT) &&
614 "unexpected instruction");
Igor Breger3b97ea32017-04-12 12:54:54 +0000615
616 const unsigned DefReg = I.getOperand(0).getReg();
617 LLT Ty = MRI.getType(DefReg);
618
Igor Breger5c787ab2017-07-03 11:06:54 +0000619 if (RBI.getRegBank(DefReg, MRI, TRI)->getID() != X86::GPRRegBankID)
620 return false;
Igor Breger3b97ea32017-04-12 12:54:54 +0000621
622 uint64_t Val = 0;
623 if (I.getOperand(1).isCImm()) {
624 Val = I.getOperand(1).getCImm()->getZExtValue();
625 I.getOperand(1).ChangeToImmediate(Val);
626 } else if (I.getOperand(1).isImm()) {
627 Val = I.getOperand(1).getImm();
628 } else
629 llvm_unreachable("Unsupported operand type.");
630
631 unsigned NewOpc;
632 switch (Ty.getSizeInBits()) {
633 case 8:
634 NewOpc = X86::MOV8ri;
635 break;
636 case 16:
637 NewOpc = X86::MOV16ri;
638 break;
639 case 32:
640 NewOpc = X86::MOV32ri;
641 break;
Eugene Zelenko60433b62017-10-05 00:33:50 +0000642 case 64:
Igor Breger3b97ea32017-04-12 12:54:54 +0000643 // TODO: in case isUInt<32>(Val), X86::MOV32ri can be used
644 if (isInt<32>(Val))
645 NewOpc = X86::MOV64ri32;
646 else
647 NewOpc = X86::MOV64ri;
648 break;
Igor Breger3b97ea32017-04-12 12:54:54 +0000649 default:
650 llvm_unreachable("Can't select G_CONSTANT, unsupported type.");
651 }
652
653 I.setDesc(TII.get(NewOpc));
654 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
655}
656
Alexander Ivchenko46e07e32018-02-28 09:18:47 +0000657// Helper function for selectTruncOrPtrToInt and selectAnyext.
Alexander Ivchenkoda9e81c2018-02-08 22:41:47 +0000658// Returns true if DstRC lives on a floating register class and
659// SrcRC lives on a 128-bit vector class.
660static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC,
661 const TargetRegisterClass *SrcRC) {
662 return (DstRC == &X86::FR32RegClass || DstRC == &X86::FR32XRegClass ||
663 DstRC == &X86::FR64RegClass || DstRC == &X86::FR64XRegClass) &&
664 (SrcRC == &X86::VR128RegClass || SrcRC == &X86::VR128XRegClass);
665}
666
667bool X86InstructionSelector::selectTurnIntoCOPY(
668 MachineInstr &I, MachineRegisterInfo &MRI, const unsigned DstReg,
669 const TargetRegisterClass *DstRC, const unsigned SrcReg,
670 const TargetRegisterClass *SrcRC) const {
671
672 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
673 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
674 DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
675 << " operand\n");
676 return false;
677 }
678 I.setDesc(TII.get(X86::COPY));
679 return true;
680}
681
Alexander Ivchenko46e07e32018-02-28 09:18:47 +0000682bool X86InstructionSelector::selectTruncOrPtrToInt(MachineInstr &I,
683 MachineRegisterInfo &MRI,
684 MachineFunction &MF) const {
685 assert((I.getOpcode() == TargetOpcode::G_TRUNC ||
686 I.getOpcode() == TargetOpcode::G_PTRTOINT) &&
687 "unexpected instruction");
Igor Breger4fdf1e42017-04-19 11:34:59 +0000688
689 const unsigned DstReg = I.getOperand(0).getReg();
690 const unsigned SrcReg = I.getOperand(1).getReg();
691
692 const LLT DstTy = MRI.getType(DstReg);
693 const LLT SrcTy = MRI.getType(SrcReg);
694
695 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
696 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
697
698 if (DstRB.getID() != SrcRB.getID()) {
Alexander Ivchenko46e07e32018-02-28 09:18:47 +0000699 DEBUG(dbgs() << TII.getName(I.getOpcode())
700 << " input/output on different banks\n");
Igor Breger4fdf1e42017-04-19 11:34:59 +0000701 return false;
702 }
703
Igor Breger1dcd5e82017-06-20 09:15:10 +0000704 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
Alexander Ivchenkoda9e81c2018-02-08 22:41:47 +0000705 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
706
707 if (!DstRC || !SrcRC)
Igor Breger4fdf1e42017-04-19 11:34:59 +0000708 return false;
709
Alexander Ivchenkoda9e81c2018-02-08 22:41:47 +0000710 // If that's truncation of the value that lives on the vector class and goes
711 // into the floating class, just replace it with copy, as we are able to
712 // select it as a regular move.
713 if (canTurnIntoCOPY(DstRC, SrcRC))
714 return selectTurnIntoCOPY(I, MRI, DstReg, DstRC, SrcReg, SrcRC);
715
716 if (DstRB.getID() != X86::GPRRegBankID)
Igor Breger4fdf1e42017-04-19 11:34:59 +0000717 return false;
718
Igor Breger014fc562017-05-21 11:13:56 +0000719 unsigned SubIdx;
720 if (DstRC == SrcRC) {
721 // Nothing to be done
722 SubIdx = X86::NoSubRegister;
723 } else if (DstRC == &X86::GR32RegClass) {
724 SubIdx = X86::sub_32bit;
725 } else if (DstRC == &X86::GR16RegClass) {
726 SubIdx = X86::sub_16bit;
727 } else if (DstRC == &X86::GR8RegClass) {
728 SubIdx = X86::sub_8bit;
729 } else {
730 return false;
731 }
732
733 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
734
Igor Breger4fdf1e42017-04-19 11:34:59 +0000735 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
736 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
Alexander Ivchenko46e07e32018-02-28 09:18:47 +0000737 DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
738 << "\n");
Igor Breger4fdf1e42017-04-19 11:34:59 +0000739 return false;
740 }
741
Igor Breger014fc562017-05-21 11:13:56 +0000742 I.getOperand(1).setSubReg(SubIdx);
Igor Breger4fdf1e42017-04-19 11:34:59 +0000743
744 I.setDesc(TII.get(X86::COPY));
745 return true;
746}
747
Igor Bregerfda31e62017-05-10 06:52:58 +0000748bool X86InstructionSelector::selectZext(MachineInstr &I,
749 MachineRegisterInfo &MRI,
750 MachineFunction &MF) const {
Igor Breger06335bb2017-09-17 14:02:19 +0000751 assert((I.getOpcode() == TargetOpcode::G_ZEXT) && "unexpected instruction");
Igor Bregerfda31e62017-05-10 06:52:58 +0000752
753 const unsigned DstReg = I.getOperand(0).getReg();
754 const unsigned SrcReg = I.getOperand(1).getReg();
755
756 const LLT DstTy = MRI.getType(DstReg);
757 const LLT SrcTy = MRI.getType(SrcReg);
758
Alexander Ivchenko327de802018-03-14 09:11:23 +0000759 assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(32)) &&
760 "8=>32 Zext is handled by tablegen");
761 assert(!(SrcTy == LLT::scalar(16) && DstTy == LLT::scalar(32)) &&
762 "16=>32 Zext is handled by tablegen");
763
764 const static struct ZextEntry {
765 LLT SrcTy;
766 LLT DstTy;
767 unsigned MovOp;
768 bool NeedSubregToReg;
769 } OpTable[] = {
770 {LLT::scalar(8), LLT::scalar(16), X86::MOVZX16rr8, false}, // i8 => i16
771 {LLT::scalar(8), LLT::scalar(64), X86::MOVZX32rr8, true}, // i8 => i64
772 {LLT::scalar(16), LLT::scalar(64), X86::MOVZX32rr16, true}, // i16 => i64
773 {LLT::scalar(32), LLT::scalar(64), 0, true} // i32 => i64
774 };
775
776 auto ZextEntryIt =
777 std::find_if(std::begin(OpTable), std::end(OpTable),
778 [SrcTy, DstTy](const ZextEntry &El) {
779 return El.DstTy == DstTy && El.SrcTy == SrcTy;
780 });
781
782 // Here we try to select Zext into a MOVZ and/or SUBREG_TO_REG instruction.
783 if (ZextEntryIt != std::end(OpTable)) {
784 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
785 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
786 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
787 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
788
789 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
790 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
791 DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
792 << " operand\n");
793 return false;
794 }
795
796 unsigned TransitRegTo = DstReg;
797 unsigned TransitRegFrom = SrcReg;
798 if (ZextEntryIt->MovOp) {
799 // If we select Zext into MOVZ + SUBREG_TO_REG, we need to have
800 // a transit register in between: create it here.
801 if (ZextEntryIt->NeedSubregToReg) {
802 TransitRegFrom = MRI.createVirtualRegister(
803 getRegClass(LLT::scalar(32), DstReg, MRI));
804 TransitRegTo = TransitRegFrom;
805 }
806
807 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(ZextEntryIt->MovOp))
808 .addDef(TransitRegTo)
809 .addReg(SrcReg);
810 }
811 if (ZextEntryIt->NeedSubregToReg) {
812 BuildMI(*I.getParent(), I, I.getDebugLoc(),
813 TII.get(TargetOpcode::SUBREG_TO_REG))
814 .addDef(DstReg)
815 .addImm(0)
816 .addReg(TransitRegFrom)
817 .addImm(X86::sub_32bit);
818 }
819 I.eraseFromParent();
820 return true;
821 }
822
Igor Bregerd48c5e42017-07-10 09:07:34 +0000823 if (SrcTy != LLT::scalar(1))
824 return false;
Igor Bregerfda31e62017-05-10 06:52:58 +0000825
Igor Bregerd48c5e42017-07-10 09:07:34 +0000826 unsigned AndOpc;
827 if (DstTy == LLT::scalar(8))
Igor Breger324d3792017-07-11 08:04:51 +0000828 AndOpc = X86::AND8ri;
Igor Bregerd48c5e42017-07-10 09:07:34 +0000829 else if (DstTy == LLT::scalar(16))
830 AndOpc = X86::AND16ri8;
831 else if (DstTy == LLT::scalar(32))
832 AndOpc = X86::AND32ri8;
833 else if (DstTy == LLT::scalar(64))
834 AndOpc = X86::AND64ri8;
835 else
836 return false;
Igor Bregerfda31e62017-05-10 06:52:58 +0000837
Igor Bregerd48c5e42017-07-10 09:07:34 +0000838 unsigned DefReg = SrcReg;
839 if (DstTy != LLT::scalar(8)) {
840 DefReg = MRI.createVirtualRegister(getRegClass(DstTy, DstReg, MRI));
Igor Bregerfda31e62017-05-10 06:52:58 +0000841 BuildMI(*I.getParent(), I, I.getDebugLoc(),
842 TII.get(TargetOpcode::SUBREG_TO_REG), DefReg)
843 .addImm(0)
844 .addReg(SrcReg)
845 .addImm(X86::sub_8bit);
Igor Bregerfda31e62017-05-10 06:52:58 +0000846 }
847
Igor Bregerd48c5e42017-07-10 09:07:34 +0000848 MachineInstr &AndInst =
849 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AndOpc), DstReg)
850 .addReg(DefReg)
851 .addImm(1);
852
853 constrainSelectedInstRegOperands(AndInst, TII, TRI, RBI);
854
855 I.eraseFromParent();
856 return true;
Igor Bregerfda31e62017-05-10 06:52:58 +0000857}
858
Igor Breger1f143642017-09-11 09:41:13 +0000859bool X86InstructionSelector::selectAnyext(MachineInstr &I,
860 MachineRegisterInfo &MRI,
861 MachineFunction &MF) const {
Igor Breger06335bb2017-09-17 14:02:19 +0000862 assert((I.getOpcode() == TargetOpcode::G_ANYEXT) && "unexpected instruction");
Igor Breger1f143642017-09-11 09:41:13 +0000863
864 const unsigned DstReg = I.getOperand(0).getReg();
865 const unsigned SrcReg = I.getOperand(1).getReg();
866
867 const LLT DstTy = MRI.getType(DstReg);
868 const LLT SrcTy = MRI.getType(SrcReg);
869
870 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
871 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
872
Igor Breger21200ed2017-09-17 08:08:13 +0000873 assert(DstRB.getID() == SrcRB.getID() &&
874 "G_ANYEXT input/output on different banks\n");
Igor Breger1f143642017-09-11 09:41:13 +0000875
Igor Breger21200ed2017-09-17 08:08:13 +0000876 assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
877 "G_ANYEXT incorrect operand size");
Igor Breger1f143642017-09-11 09:41:13 +0000878
Igor Breger1f143642017-09-11 09:41:13 +0000879 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
880 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
881
Alexander Ivchenkoda9e81c2018-02-08 22:41:47 +0000882 // If that's ANY_EXT of the value that lives on the floating class and goes
883 // into the vector class, just replace it with copy, as we are able to select
884 // it as a regular move.
885 if (canTurnIntoCOPY(SrcRC, DstRC))
886 return selectTurnIntoCOPY(I, MRI, SrcReg, SrcRC, DstReg, DstRC);
887
888 if (DstRB.getID() != X86::GPRRegBankID)
889 return false;
890
Igor Breger1f143642017-09-11 09:41:13 +0000891 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
892 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
893 DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
894 << " operand\n");
895 return false;
896 }
897
898 if (SrcRC == DstRC) {
899 I.setDesc(TII.get(X86::COPY));
900 return true;
901 }
902
903 BuildMI(*I.getParent(), I, I.getDebugLoc(),
904 TII.get(TargetOpcode::SUBREG_TO_REG))
905 .addDef(DstReg)
906 .addImm(0)
907 .addReg(SrcReg)
908 .addImm(getSubRegIndex(SrcRC));
909
910 I.eraseFromParent();
911 return true;
912}
913
Igor Bregerc7b59772017-05-11 07:17:40 +0000914bool X86InstructionSelector::selectCmp(MachineInstr &I,
915 MachineRegisterInfo &MRI,
916 MachineFunction &MF) const {
Igor Breger06335bb2017-09-17 14:02:19 +0000917 assert((I.getOpcode() == TargetOpcode::G_ICMP) && "unexpected instruction");
Igor Bregerc7b59772017-05-11 07:17:40 +0000918
919 X86::CondCode CC;
920 bool SwapArgs;
921 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(
922 (CmpInst::Predicate)I.getOperand(1).getPredicate());
923 unsigned OpSet = X86::getSETFromCond(CC);
924
925 unsigned LHS = I.getOperand(2).getReg();
926 unsigned RHS = I.getOperand(3).getReg();
927
928 if (SwapArgs)
929 std::swap(LHS, RHS);
930
931 unsigned OpCmp;
932 LLT Ty = MRI.getType(LHS);
933
934 switch (Ty.getSizeInBits()) {
935 default:
936 return false;
937 case 8:
938 OpCmp = X86::CMP8rr;
939 break;
940 case 16:
941 OpCmp = X86::CMP16rr;
942 break;
943 case 32:
944 OpCmp = X86::CMP32rr;
945 break;
946 case 64:
947 OpCmp = X86::CMP64rr;
948 break;
949 }
950
951 MachineInstr &CmpInst =
952 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
953 .addReg(LHS)
954 .addReg(RHS);
955
956 MachineInstr &SetInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
957 TII.get(OpSet), I.getOperand(0).getReg());
958
959 constrainSelectedInstRegOperands(CmpInst, TII, TRI, RBI);
960 constrainSelectedInstRegOperands(SetInst, TII, TRI, RBI);
961
962 I.eraseFromParent();
963 return true;
964}
965
Igor Breger28f290f2017-05-17 12:48:08 +0000966bool X86InstructionSelector::selectUadde(MachineInstr &I,
967 MachineRegisterInfo &MRI,
968 MachineFunction &MF) const {
Igor Breger06335bb2017-09-17 14:02:19 +0000969 assert((I.getOpcode() == TargetOpcode::G_UADDE) && "unexpected instruction");
Igor Breger28f290f2017-05-17 12:48:08 +0000970
971 const unsigned DstReg = I.getOperand(0).getReg();
972 const unsigned CarryOutReg = I.getOperand(1).getReg();
973 const unsigned Op0Reg = I.getOperand(2).getReg();
974 const unsigned Op1Reg = I.getOperand(3).getReg();
975 unsigned CarryInReg = I.getOperand(4).getReg();
976
977 const LLT DstTy = MRI.getType(DstReg);
978
979 if (DstTy != LLT::scalar(32))
980 return false;
981
982 // find CarryIn def instruction.
983 MachineInstr *Def = MRI.getVRegDef(CarryInReg);
984 while (Def->getOpcode() == TargetOpcode::G_TRUNC) {
985 CarryInReg = Def->getOperand(1).getReg();
986 Def = MRI.getVRegDef(CarryInReg);
987 }
988
989 unsigned Opcode;
990 if (Def->getOpcode() == TargetOpcode::G_UADDE) {
991 // carry set by prev ADD.
992
993 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), X86::EFLAGS)
994 .addReg(CarryInReg);
995
996 if (!RBI.constrainGenericRegister(CarryInReg, X86::GR32RegClass, MRI))
997 return false;
998
999 Opcode = X86::ADC32rr;
1000 } else if (auto val = getConstantVRegVal(CarryInReg, MRI)) {
1001 // carry is constant, support only 0.
1002 if (*val != 0)
1003 return false;
1004
1005 Opcode = X86::ADD32rr;
1006 } else
1007 return false;
1008
1009 MachineInstr &AddInst =
1010 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg)
1011 .addReg(Op0Reg)
1012 .addReg(Op1Reg);
1013
1014 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), CarryOutReg)
1015 .addReg(X86::EFLAGS);
1016
1017 if (!constrainSelectedInstRegOperands(AddInst, TII, TRI, RBI) ||
1018 !RBI.constrainGenericRegister(CarryOutReg, X86::GR32RegClass, MRI))
1019 return false;
1020
1021 I.eraseFromParent();
1022 return true;
1023}
1024
Igor Bregerf5035d62017-06-25 11:42:17 +00001025bool X86InstructionSelector::selectExtract(MachineInstr &I,
1026 MachineRegisterInfo &MRI,
1027 MachineFunction &MF) const {
Igor Breger06335bb2017-09-17 14:02:19 +00001028 assert((I.getOpcode() == TargetOpcode::G_EXTRACT) &&
1029 "unexpected instruction");
Igor Bregerf5035d62017-06-25 11:42:17 +00001030
1031 const unsigned DstReg = I.getOperand(0).getReg();
1032 const unsigned SrcReg = I.getOperand(1).getReg();
1033 int64_t Index = I.getOperand(2).getImm();
1034
1035 const LLT DstTy = MRI.getType(DstReg);
1036 const LLT SrcTy = MRI.getType(SrcReg);
1037
1038 // Meanwile handle vector type only.
1039 if (!DstTy.isVector())
1040 return false;
1041
1042 if (Index % DstTy.getSizeInBits() != 0)
1043 return false; // Not extract subvector.
1044
1045 if (Index == 0) {
1046 // Replace by extract subreg copy.
1047 if (!emitExtractSubreg(DstReg, SrcReg, I, MRI, MF))
1048 return false;
1049
1050 I.eraseFromParent();
1051 return true;
1052 }
1053
1054 bool HasAVX = STI.hasAVX();
1055 bool HasAVX512 = STI.hasAVX512();
1056 bool HasVLX = STI.hasVLX();
1057
1058 if (SrcTy.getSizeInBits() == 256 && DstTy.getSizeInBits() == 128) {
1059 if (HasVLX)
1060 I.setDesc(TII.get(X86::VEXTRACTF32x4Z256rr));
1061 else if (HasAVX)
1062 I.setDesc(TII.get(X86::VEXTRACTF128rr));
1063 else
1064 return false;
1065 } else if (SrcTy.getSizeInBits() == 512 && HasAVX512) {
1066 if (DstTy.getSizeInBits() == 128)
1067 I.setDesc(TII.get(X86::VEXTRACTF32x4Zrr));
1068 else if (DstTy.getSizeInBits() == 256)
1069 I.setDesc(TII.get(X86::VEXTRACTF64x4Zrr));
1070 else
1071 return false;
1072 } else
1073 return false;
1074
1075 // Convert to X86 VEXTRACT immediate.
1076 Index = Index / DstTy.getSizeInBits();
1077 I.getOperand(2).setImm(Index);
1078
1079 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1080}
1081
1082bool X86InstructionSelector::emitExtractSubreg(unsigned DstReg, unsigned SrcReg,
1083 MachineInstr &I,
1084 MachineRegisterInfo &MRI,
1085 MachineFunction &MF) const {
Igor Bregerf5035d62017-06-25 11:42:17 +00001086 const LLT DstTy = MRI.getType(DstReg);
1087 const LLT SrcTy = MRI.getType(SrcReg);
1088 unsigned SubIdx = X86::NoSubRegister;
1089
1090 if (!DstTy.isVector() || !SrcTy.isVector())
1091 return false;
1092
1093 assert(SrcTy.getSizeInBits() > DstTy.getSizeInBits() &&
1094 "Incorrect Src/Dst register size");
1095
1096 if (DstTy.getSizeInBits() == 128)
1097 SubIdx = X86::sub_xmm;
1098 else if (DstTy.getSizeInBits() == 256)
1099 SubIdx = X86::sub_ymm;
1100 else
1101 return false;
1102
1103 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
1104 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
1105
1106 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
1107
1108 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1109 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1110 DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
1111 return false;
1112 }
1113
1114 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), DstReg)
1115 .addReg(SrcReg, 0, SubIdx);
1116
1117 return true;
1118}
1119
Igor Breger1c29be72017-06-22 09:43:35 +00001120bool X86InstructionSelector::emitInsertSubreg(unsigned DstReg, unsigned SrcReg,
1121 MachineInstr &I,
1122 MachineRegisterInfo &MRI,
1123 MachineFunction &MF) const {
Igor Breger1c29be72017-06-22 09:43:35 +00001124 const LLT DstTy = MRI.getType(DstReg);
1125 const LLT SrcTy = MRI.getType(SrcReg);
1126 unsigned SubIdx = X86::NoSubRegister;
1127
1128 // TODO: support scalar types
1129 if (!DstTy.isVector() || !SrcTy.isVector())
1130 return false;
1131
1132 assert(SrcTy.getSizeInBits() < DstTy.getSizeInBits() &&
1133 "Incorrect Src/Dst register size");
1134
1135 if (SrcTy.getSizeInBits() == 128)
1136 SubIdx = X86::sub_xmm;
1137 else if (SrcTy.getSizeInBits() == 256)
1138 SubIdx = X86::sub_ymm;
1139 else
1140 return false;
1141
1142 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
1143 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
1144
1145 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1146 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1147 DEBUG(dbgs() << "Failed to constrain INSERT_SUBREG\n");
1148 return false;
1149 }
1150
1151 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY))
1152 .addReg(DstReg, RegState::DefineNoRead, SubIdx)
1153 .addReg(SrcReg);
1154
1155 return true;
1156}
1157
1158bool X86InstructionSelector::selectInsert(MachineInstr &I,
1159 MachineRegisterInfo &MRI,
1160 MachineFunction &MF) const {
Igor Breger06335bb2017-09-17 14:02:19 +00001161 assert((I.getOpcode() == TargetOpcode::G_INSERT) && "unexpected instruction");
Igor Breger1c29be72017-06-22 09:43:35 +00001162
1163 const unsigned DstReg = I.getOperand(0).getReg();
1164 const unsigned SrcReg = I.getOperand(1).getReg();
1165 const unsigned InsertReg = I.getOperand(2).getReg();
1166 int64_t Index = I.getOperand(3).getImm();
1167
1168 const LLT DstTy = MRI.getType(DstReg);
1169 const LLT InsertRegTy = MRI.getType(InsertReg);
1170
1171 // Meanwile handle vector type only.
1172 if (!DstTy.isVector())
1173 return false;
1174
1175 if (Index % InsertRegTy.getSizeInBits() != 0)
1176 return false; // Not insert subvector.
1177
1178 if (Index == 0 && MRI.getVRegDef(SrcReg)->isImplicitDef()) {
1179 // Replace by subreg copy.
1180 if (!emitInsertSubreg(DstReg, InsertReg, I, MRI, MF))
1181 return false;
1182
1183 I.eraseFromParent();
1184 return true;
1185 }
1186
1187 bool HasAVX = STI.hasAVX();
1188 bool HasAVX512 = STI.hasAVX512();
1189 bool HasVLX = STI.hasVLX();
1190
1191 if (DstTy.getSizeInBits() == 256 && InsertRegTy.getSizeInBits() == 128) {
1192 if (HasVLX)
1193 I.setDesc(TII.get(X86::VINSERTF32x4Z256rr));
1194 else if (HasAVX)
1195 I.setDesc(TII.get(X86::VINSERTF128rr));
1196 else
1197 return false;
1198 } else if (DstTy.getSizeInBits() == 512 && HasAVX512) {
1199 if (InsertRegTy.getSizeInBits() == 128)
1200 I.setDesc(TII.get(X86::VINSERTF32x4Zrr));
1201 else if (InsertRegTy.getSizeInBits() == 256)
1202 I.setDesc(TII.get(X86::VINSERTF64x4Zrr));
1203 else
1204 return false;
1205 } else
1206 return false;
1207
1208 // Convert to X86 VINSERT immediate.
1209 Index = Index / InsertRegTy.getSizeInBits();
1210
1211 I.getOperand(3).setImm(Index);
1212
1213 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1214}
1215
Daniel Sandersf76f3152017-11-16 00:46:35 +00001216bool X86InstructionSelector::selectUnmergeValues(
1217 MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF,
1218 CodeGenCoverage &CoverageInfo) const {
Igor Breger06335bb2017-09-17 14:02:19 +00001219 assert((I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES) &&
1220 "unexpected instruction");
Igor Bregerb186a692017-07-02 08:15:49 +00001221
1222 // Split to extracts.
1223 unsigned NumDefs = I.getNumOperands() - 1;
1224 unsigned SrcReg = I.getOperand(NumDefs).getReg();
1225 unsigned DefSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
1226
1227 for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {
Igor Bregerb186a692017-07-02 08:15:49 +00001228 MachineInstr &ExtrInst =
1229 *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1230 TII.get(TargetOpcode::G_EXTRACT), I.getOperand(Idx).getReg())
1231 .addReg(SrcReg)
1232 .addImm(Idx * DefSize);
1233
Daniel Sandersf76f3152017-11-16 00:46:35 +00001234 if (!select(ExtrInst, CoverageInfo))
Igor Bregerb186a692017-07-02 08:15:49 +00001235 return false;
1236 }
1237
1238 I.eraseFromParent();
1239 return true;
1240}
1241
Daniel Sandersf76f3152017-11-16 00:46:35 +00001242bool X86InstructionSelector::selectMergeValues(
1243 MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF,
1244 CodeGenCoverage &CoverageInfo) const {
Igor Breger06335bb2017-09-17 14:02:19 +00001245 assert((I.getOpcode() == TargetOpcode::G_MERGE_VALUES) &&
1246 "unexpected instruction");
Igor Breger0cddd342017-06-29 12:08:28 +00001247
1248 // Split to inserts.
1249 unsigned DstReg = I.getOperand(0).getReg();
1250 unsigned SrcReg0 = I.getOperand(1).getReg();
1251
1252 const LLT DstTy = MRI.getType(DstReg);
1253 const LLT SrcTy = MRI.getType(SrcReg0);
1254 unsigned SrcSize = SrcTy.getSizeInBits();
1255
1256 const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
1257
1258 // For the first src use insertSubReg.
1259 unsigned DefReg = MRI.createGenericVirtualRegister(DstTy);
1260 MRI.setRegBank(DefReg, RegBank);
1261 if (!emitInsertSubreg(DefReg, I.getOperand(1).getReg(), I, MRI, MF))
1262 return false;
1263
1264 for (unsigned Idx = 2; Idx < I.getNumOperands(); ++Idx) {
Igor Breger0cddd342017-06-29 12:08:28 +00001265 unsigned Tmp = MRI.createGenericVirtualRegister(DstTy);
1266 MRI.setRegBank(Tmp, RegBank);
1267
1268 MachineInstr &InsertInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1269 TII.get(TargetOpcode::G_INSERT), Tmp)
1270 .addReg(DefReg)
1271 .addReg(I.getOperand(Idx).getReg())
1272 .addImm((Idx - 1) * SrcSize);
1273
1274 DefReg = Tmp;
1275
Daniel Sandersf76f3152017-11-16 00:46:35 +00001276 if (!select(InsertInst, CoverageInfo))
Igor Breger0cddd342017-06-29 12:08:28 +00001277 return false;
1278 }
1279
1280 MachineInstr &CopyInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1281 TII.get(TargetOpcode::COPY), DstReg)
1282 .addReg(DefReg);
1283
Daniel Sandersf76f3152017-11-16 00:46:35 +00001284 if (!select(CopyInst, CoverageInfo))
Igor Breger0cddd342017-06-29 12:08:28 +00001285 return false;
1286
1287 I.eraseFromParent();
1288 return true;
1289}
Igor Breger685889c2017-08-21 10:51:54 +00001290
1291bool X86InstructionSelector::selectCondBranch(MachineInstr &I,
1292 MachineRegisterInfo &MRI,
1293 MachineFunction &MF) const {
Igor Breger06335bb2017-09-17 14:02:19 +00001294 assert((I.getOpcode() == TargetOpcode::G_BRCOND) && "unexpected instruction");
Igor Breger685889c2017-08-21 10:51:54 +00001295
1296 const unsigned CondReg = I.getOperand(0).getReg();
1297 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
1298
1299 MachineInstr &TestInst =
1300 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::TEST8ri))
1301 .addReg(CondReg)
1302 .addImm(1);
1303 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::JNE_1))
1304 .addMBB(DestMBB);
1305
1306 constrainSelectedInstRegOperands(TestInst, TII, TRI, RBI);
1307
1308 I.eraseFromParent();
1309 return true;
1310}
1311
Igor Breger21200ed2017-09-17 08:08:13 +00001312bool X86InstructionSelector::materializeFP(MachineInstr &I,
1313 MachineRegisterInfo &MRI,
1314 MachineFunction &MF) const {
Igor Breger06335bb2017-09-17 14:02:19 +00001315 assert((I.getOpcode() == TargetOpcode::G_FCONSTANT) &&
1316 "unexpected instruction");
Igor Breger21200ed2017-09-17 08:08:13 +00001317
1318 // Can't handle alternate code models yet.
1319 CodeModel::Model CM = TM.getCodeModel();
1320 if (CM != CodeModel::Small && CM != CodeModel::Large)
1321 return false;
1322
1323 const unsigned DstReg = I.getOperand(0).getReg();
1324 const LLT DstTy = MRI.getType(DstReg);
1325 const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
1326 unsigned Align = DstTy.getSizeInBits();
1327 const DebugLoc &DbgLoc = I.getDebugLoc();
1328
1329 unsigned Opc = getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Align);
1330
1331 // Create the load from the constant pool.
1332 const ConstantFP *CFP = I.getOperand(1).getFPImm();
1333 unsigned CPI = MF.getConstantPool()->getConstantPoolIndex(CFP, Align);
1334 MachineInstr *LoadInst = nullptr;
1335 unsigned char OpFlag = STI.classifyLocalReference(nullptr);
1336
1337 if (CM == CodeModel::Large && STI.is64Bit()) {
1338 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
1339 // they cannot be folded into immediate fields.
1340
1341 unsigned AddrReg = MRI.createVirtualRegister(&X86::GR64RegClass);
1342 BuildMI(*I.getParent(), I, DbgLoc, TII.get(X86::MOV64ri), AddrReg)
1343 .addConstantPoolIndex(CPI, 0, OpFlag);
1344
1345 MachineMemOperand *MMO = MF.getMachineMemOperand(
1346 MachinePointerInfo::getConstantPool(MF), MachineMemOperand::MOLoad,
1347 MF.getDataLayout().getPointerSize(), Align);
1348
1349 LoadInst =
1350 addDirectMem(BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg),
1351 AddrReg)
1352 .addMemOperand(MMO);
1353
Igor Breger06335bb2017-09-17 14:02:19 +00001354 } else if (CM == CodeModel::Small || !STI.is64Bit()) {
Igor Breger21200ed2017-09-17 08:08:13 +00001355 // Handle the case when globals fit in our immediate field.
1356 // This is true for X86-32 always and X86-64 when in -mcmodel=small mode.
1357
1358 // x86-32 PIC requires a PIC base register for constant pools.
1359 unsigned PICBase = 0;
1360 if (OpFlag == X86II::MO_PIC_BASE_OFFSET || OpFlag == X86II::MO_GOTOFF) {
1361 // PICBase can be allocated by TII.getGlobalBaseReg(&MF).
1362 // In DAGISEL the code that initialize it generated by the CGBR pass.
1363 return false; // TODO support the mode.
Igor Breger06335bb2017-09-17 14:02:19 +00001364 } else if (STI.is64Bit() && TM.getCodeModel() == CodeModel::Small)
Igor Breger21200ed2017-09-17 08:08:13 +00001365 PICBase = X86::RIP;
1366
1367 LoadInst = addConstantPoolReference(
1368 BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg), CPI, PICBase,
1369 OpFlag);
1370 } else
1371 return false;
1372
1373 constrainSelectedInstRegOperands(*LoadInst, TII, TRI, RBI);
1374 I.eraseFromParent();
1375 return true;
1376}
1377
Igor Breger2661ae42017-09-04 09:06:45 +00001378bool X86InstructionSelector::selectImplicitDefOrPHI(
1379 MachineInstr &I, MachineRegisterInfo &MRI) const {
Igor Breger06335bb2017-09-17 14:02:19 +00001380 assert((I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
1381 I.getOpcode() == TargetOpcode::G_PHI) &&
1382 "unexpected instruction");
Igor Breger47be5fb2017-08-24 07:06:27 +00001383
1384 unsigned DstReg = I.getOperand(0).getReg();
1385
1386 if (!MRI.getRegClassOrNull(DstReg)) {
1387 const LLT DstTy = MRI.getType(DstReg);
1388 const TargetRegisterClass *RC = getRegClass(DstTy, DstReg, MRI);
1389
1390 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
1391 DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
1392 << " operand\n");
1393 return false;
1394 }
1395 }
1396
Igor Breger2661ae42017-09-04 09:06:45 +00001397 if (I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1398 I.setDesc(TII.get(X86::IMPLICIT_DEF));
1399 else
1400 I.setDesc(TII.get(X86::PHI));
1401
Igor Breger47be5fb2017-08-24 07:06:27 +00001402 return true;
1403}
1404
Alexander Ivchenko0bd4d8c2018-03-14 11:23:57 +00001405// Currently GlobalIsel TableGen generates patterns for shift imm and shift 1,
1406// but with shiftCount i8. In G_LSHR/G_ASHR/G_SHL like LLVM-IR both arguments
1407// has the same type, so for now only shift i8 can use auto generated
1408// TableGen patterns.
1409bool X86InstructionSelector::selectShift(MachineInstr &I,
1410 MachineRegisterInfo &MRI,
1411 MachineFunction &MF) const {
1412
1413 assert((I.getOpcode() == TargetOpcode::G_SHL ||
1414 I.getOpcode() == TargetOpcode::G_ASHR ||
1415 I.getOpcode() == TargetOpcode::G_LSHR) &&
1416 "unexpected instruction");
1417
1418 unsigned DstReg = I.getOperand(0).getReg();
1419 const LLT DstTy = MRI.getType(DstReg);
1420 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
1421
1422 const static struct ShiftEntry {
1423 unsigned SizeInBits;
1424 unsigned CReg;
1425 unsigned OpLSHR;
1426 unsigned OpASHR;
1427 unsigned OpSHL;
1428 } OpTable[] = {
1429 {8, X86::CL, X86::SHR8rCL, X86::SAR8rCL, X86::SHL8rCL}, // i8
1430 {16, X86::CX, X86::SHR16rCL, X86::SAR16rCL, X86::SHL16rCL}, // i16
1431 {32, X86::ECX, X86::SHR32rCL, X86::SAR32rCL, X86::SHL32rCL}, // i32
1432 {64, X86::RCX, X86::SHR64rCL, X86::SAR64rCL, X86::SHL64rCL} // i64
1433 };
1434
1435 if (DstRB.getID() != X86::GPRRegBankID)
1436 return false;
1437
1438 auto ShiftEntryIt = std::find_if(
1439 std::begin(OpTable), std::end(OpTable), [DstTy](const ShiftEntry &El) {
1440 return El.SizeInBits == DstTy.getSizeInBits();
1441 });
1442 if (ShiftEntryIt == std::end(OpTable))
1443 return false;
1444
1445 unsigned CReg = ShiftEntryIt->CReg;
1446 unsigned Opcode = 0;
1447 switch (I.getOpcode()) {
1448 case TargetOpcode::G_SHL:
1449 Opcode = ShiftEntryIt->OpSHL;
1450 break;
1451 case TargetOpcode::G_ASHR:
1452 Opcode = ShiftEntryIt->OpASHR;
1453 break;
1454 case TargetOpcode::G_LSHR:
1455 Opcode = ShiftEntryIt->OpLSHR;
1456 break;
1457 default:
1458 return false;
1459 }
1460
1461 unsigned Op0Reg = I.getOperand(1).getReg();
1462 unsigned Op1Reg = I.getOperand(2).getReg();
1463
1464 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),
1465 ShiftEntryIt->CReg)
1466 .addReg(Op1Reg);
1467
1468 // The shift instruction uses X86::CL. If we defined a super-register
1469 // of X86::CL, emit a subreg KILL to precisely describe what we're doing here.
1470 if (CReg != X86::CL)
1471 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::KILL),
1472 X86::CL)
1473 .addReg(CReg, RegState::Kill);
1474
1475 MachineInstr &ShiftInst =
1476 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg)
1477 .addReg(Op0Reg);
1478
1479 constrainSelectedInstRegOperands(ShiftInst, TII, TRI, RBI);
1480 I.eraseFromParent();
1481 return true;
1482}
1483
Daniel Sanders0b5293f2017-04-06 09:49:34 +00001484InstructionSelector *
Daniel Sanderse7b0d662017-04-21 15:59:56 +00001485llvm::createX86InstructionSelector(const X86TargetMachine &TM,
1486 X86Subtarget &Subtarget,
Daniel Sanders0b5293f2017-04-06 09:49:34 +00001487 X86RegisterBankInfo &RBI) {
Daniel Sanderse7b0d662017-04-21 15:59:56 +00001488 return new X86InstructionSelector(TM, Subtarget, RBI);
Daniel Sanders0b5293f2017-04-06 09:49:34 +00001489}