blob: 8be677312b13de966a0cc1a3dd2c7e70061e16db [file] [log] [blame]
Eugene Zelenko60433b62017-10-05 00:33:50 +00001//===- X86InstructionSelector.cpp -----------------------------------------===//
Igor Bregerf7359d82017-02-22 12:25:09 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file
10/// This file implements the targeting of the InstructionSelector class for
11/// X86.
12/// \todo This should be generated by TableGen.
13//===----------------------------------------------------------------------===//
14
Eugene Zelenko60433b62017-10-05 00:33:50 +000015#include "MCTargetDesc/X86BaseInfo.h"
Igor Bregera8ba5722017-03-23 15:25:57 +000016#include "X86InstrBuilder.h"
Igor Bregerf7359d82017-02-22 12:25:09 +000017#include "X86InstrInfo.h"
18#include "X86RegisterBankInfo.h"
19#include "X86RegisterInfo.h"
20#include "X86Subtarget.h"
21#include "X86TargetMachine.h"
Igor Breger3b97ea32017-04-12 12:54:54 +000022#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
Eugene Zelenko60433b62017-10-05 00:33:50 +000023#include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
24#include "llvm/CodeGen/GlobalISel/RegisterBank.h"
Igor Breger28f290f2017-05-17 12:48:08 +000025#include "llvm/CodeGen/GlobalISel/Utils.h"
Igor Bregerf7359d82017-02-22 12:25:09 +000026#include "llvm/CodeGen/MachineBasicBlock.h"
Igor Breger21200ed2017-09-17 08:08:13 +000027#include "llvm/CodeGen/MachineConstantPool.h"
Igor Bregerf7359d82017-02-22 12:25:09 +000028#include "llvm/CodeGen/MachineFunction.h"
29#include "llvm/CodeGen/MachineInstr.h"
30#include "llvm/CodeGen/MachineInstrBuilder.h"
Eugene Zelenko60433b62017-10-05 00:33:50 +000031#include "llvm/CodeGen/MachineMemOperand.h"
Daniel Sanders0b5293f2017-04-06 09:49:34 +000032#include "llvm/CodeGen/MachineOperand.h"
Igor Bregerf7359d82017-02-22 12:25:09 +000033#include "llvm/CodeGen/MachineRegisterInfo.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000034#include "llvm/CodeGen/TargetOpcodes.h"
35#include "llvm/CodeGen/TargetRegisterInfo.h"
Eugene Zelenko60433b62017-10-05 00:33:50 +000036#include "llvm/IR/DataLayout.h"
37#include "llvm/IR/InstrTypes.h"
38#include "llvm/Support/AtomicOrdering.h"
39#include "llvm/Support/CodeGen.h"
Igor Bregerf7359d82017-02-22 12:25:09 +000040#include "llvm/Support/Debug.h"
Eugene Zelenko60433b62017-10-05 00:33:50 +000041#include "llvm/Support/ErrorHandling.h"
42#include "llvm/Support/LowLevelTypeImpl.h"
43#include "llvm/Support/MathExtras.h"
Igor Bregerf7359d82017-02-22 12:25:09 +000044#include "llvm/Support/raw_ostream.h"
Eugene Zelenko60433b62017-10-05 00:33:50 +000045#include <cassert>
46#include <cstdint>
47#include <tuple>
Daniel Sanders6ab0daa2017-07-04 14:35:06 +000048
David Blaikie62651302017-10-26 23:39:54 +000049#define DEBUG_TYPE "X86-isel"
50
Igor Bregerf7359d82017-02-22 12:25:09 +000051using namespace llvm;
52
Daniel Sanders0b5293f2017-04-06 09:49:34 +000053namespace {
54
Daniel Sanderse7b0d662017-04-21 15:59:56 +000055#define GET_GLOBALISEL_PREDICATE_BITSET
56#include "X86GenGlobalISel.inc"
57#undef GET_GLOBALISEL_PREDICATE_BITSET
58
Daniel Sanders0b5293f2017-04-06 09:49:34 +000059class X86InstructionSelector : public InstructionSelector {
60public:
Daniel Sanderse7b0d662017-04-21 15:59:56 +000061 X86InstructionSelector(const X86TargetMachine &TM, const X86Subtarget &STI,
Daniel Sanders0b5293f2017-04-06 09:49:34 +000062 const X86RegisterBankInfo &RBI);
63
Daniel Sandersf76f3152017-11-16 00:46:35 +000064 bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const override;
David Blaikie62651302017-10-26 23:39:54 +000065 static const char *getName() { return DEBUG_TYPE; }
Daniel Sanders0b5293f2017-04-06 09:49:34 +000066
67private:
68 /// tblgen-erated 'select' implementation, used as the initial selector for
69 /// the patterns that don't require complex C++.
Daniel Sandersf76f3152017-11-16 00:46:35 +000070 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
Daniel Sanders0b5293f2017-04-06 09:49:34 +000071
Hiroshi Inouebb703e82017-07-02 03:24:54 +000072 // TODO: remove after supported by Tablegen-erated instruction selection.
Igor Breger21200ed2017-09-17 08:08:13 +000073 unsigned getLoadStoreOp(const LLT &Ty, const RegisterBank &RB, unsigned Opc,
Daniel Sanders0b5293f2017-04-06 09:49:34 +000074 uint64_t Alignment) const;
75
Daniel Sanders0b5293f2017-04-06 09:49:34 +000076 bool selectLoadStoreOp(MachineInstr &I, MachineRegisterInfo &MRI,
77 MachineFunction &MF) const;
Igor Breger810c6252017-05-08 09:40:43 +000078 bool selectFrameIndexOrGep(MachineInstr &I, MachineRegisterInfo &MRI,
79 MachineFunction &MF) const;
Igor Breger717bd362017-07-02 08:58:29 +000080 bool selectGlobalValue(MachineInstr &I, MachineRegisterInfo &MRI,
81 MachineFunction &MF) const;
Igor Breger3b97ea32017-04-12 12:54:54 +000082 bool selectConstant(MachineInstr &I, MachineRegisterInfo &MRI,
83 MachineFunction &MF) const;
Alexander Ivchenko46e07e32018-02-28 09:18:47 +000084 bool selectTruncOrPtrToInt(MachineInstr &I, MachineRegisterInfo &MRI,
85 MachineFunction &MF) const;
Igor Bregerfda31e62017-05-10 06:52:58 +000086 bool selectZext(MachineInstr &I, MachineRegisterInfo &MRI,
87 MachineFunction &MF) const;
Igor Breger1f143642017-09-11 09:41:13 +000088 bool selectAnyext(MachineInstr &I, MachineRegisterInfo &MRI,
89 MachineFunction &MF) const;
Igor Bregerc7b59772017-05-11 07:17:40 +000090 bool selectCmp(MachineInstr &I, MachineRegisterInfo &MRI,
91 MachineFunction &MF) const;
Alexander Ivchenkoa26a3642018-08-31 09:38:27 +000092 bool selectFCmp(MachineInstr &I, MachineRegisterInfo &MRI,
93 MachineFunction &MF) const;
Igor Breger28f290f2017-05-17 12:48:08 +000094 bool selectUadde(MachineInstr &I, MachineRegisterInfo &MRI,
95 MachineFunction &MF) const;
Igor Breger1dcd5e82017-06-20 09:15:10 +000096 bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const;
Igor Bregerb186a692017-07-02 08:15:49 +000097 bool selectUnmergeValues(MachineInstr &I, MachineRegisterInfo &MRI,
Daniel Sandersf76f3152017-11-16 00:46:35 +000098 MachineFunction &MF,
99 CodeGenCoverage &CoverageInfo) const;
Igor Breger0cddd342017-06-29 12:08:28 +0000100 bool selectMergeValues(MachineInstr &I, MachineRegisterInfo &MRI,
Daniel Sandersf76f3152017-11-16 00:46:35 +0000101 MachineFunction &MF,
102 CodeGenCoverage &CoverageInfo) const;
Igor Breger1c29be72017-06-22 09:43:35 +0000103 bool selectInsert(MachineInstr &I, MachineRegisterInfo &MRI,
104 MachineFunction &MF) const;
Igor Bregerf5035d62017-06-25 11:42:17 +0000105 bool selectExtract(MachineInstr &I, MachineRegisterInfo &MRI,
106 MachineFunction &MF) const;
Igor Breger685889c2017-08-21 10:51:54 +0000107 bool selectCondBranch(MachineInstr &I, MachineRegisterInfo &MRI,
108 MachineFunction &MF) const;
Alexander Ivchenkoda9e81c2018-02-08 22:41:47 +0000109 bool selectTurnIntoCOPY(MachineInstr &I, MachineRegisterInfo &MRI,
110 const unsigned DstReg,
111 const TargetRegisterClass *DstRC,
112 const unsigned SrcReg,
113 const TargetRegisterClass *SrcRC) const;
Igor Breger21200ed2017-09-17 08:08:13 +0000114 bool materializeFP(MachineInstr &I, MachineRegisterInfo &MRI,
115 MachineFunction &MF) const;
Igor Breger2661ae42017-09-04 09:06:45 +0000116 bool selectImplicitDefOrPHI(MachineInstr &I, MachineRegisterInfo &MRI) const;
Alexander Ivchenko0bd4d8c2018-03-14 11:23:57 +0000117 bool selectShift(MachineInstr &I, MachineRegisterInfo &MRI,
118 MachineFunction &MF) const;
Alexander Ivchenko86ef9ab2018-03-14 15:41:11 +0000119 bool selectSDiv(MachineInstr &I, MachineRegisterInfo &MRI,
120 MachineFunction &MF) const;
Igor Breger1c29be72017-06-22 09:43:35 +0000121
122 // emit insert subreg instruction and insert it before MachineInstr &I
123 bool emitInsertSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I,
124 MachineRegisterInfo &MRI, MachineFunction &MF) const;
Igor Bregerf5035d62017-06-25 11:42:17 +0000125 // emit extract subreg instruction and insert it before MachineInstr &I
126 bool emitExtractSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I,
127 MachineRegisterInfo &MRI, MachineFunction &MF) const;
Igor Breger1dcd5e82017-06-20 09:15:10 +0000128
129 const TargetRegisterClass *getRegClass(LLT Ty, const RegisterBank &RB) const;
130 const TargetRegisterClass *getRegClass(LLT Ty, unsigned Reg,
131 MachineRegisterInfo &MRI) const;
Igor Breger28f290f2017-05-17 12:48:08 +0000132
Daniel Sanderse7b0d662017-04-21 15:59:56 +0000133 const X86TargetMachine &TM;
Daniel Sanders0b5293f2017-04-06 09:49:34 +0000134 const X86Subtarget &STI;
135 const X86InstrInfo &TII;
136 const X86RegisterInfo &TRI;
137 const X86RegisterBankInfo &RBI;
Daniel Sanderse7b0d662017-04-21 15:59:56 +0000138
Daniel Sanderse9fdba32017-04-29 17:30:09 +0000139#define GET_GLOBALISEL_PREDICATES_DECL
140#include "X86GenGlobalISel.inc"
141#undef GET_GLOBALISEL_PREDICATES_DECL
Daniel Sanders0b5293f2017-04-06 09:49:34 +0000142
143#define GET_GLOBALISEL_TEMPORARIES_DECL
144#include "X86GenGlobalISel.inc"
145#undef GET_GLOBALISEL_TEMPORARIES_DECL
146};
147
148} // end anonymous namespace
149
Daniel Sanders8a4bae92017-03-14 21:32:08 +0000150#define GET_GLOBALISEL_IMPL
Igor Bregerf7359d82017-02-22 12:25:09 +0000151#include "X86GenGlobalISel.inc"
Daniel Sanders8a4bae92017-03-14 21:32:08 +0000152#undef GET_GLOBALISEL_IMPL
Igor Bregerf7359d82017-02-22 12:25:09 +0000153
Daniel Sanderse7b0d662017-04-21 15:59:56 +0000154X86InstructionSelector::X86InstructionSelector(const X86TargetMachine &TM,
155 const X86Subtarget &STI,
Igor Bregerf7359d82017-02-22 12:25:09 +0000156 const X86RegisterBankInfo &RBI)
Daniel Sanderse7b0d662017-04-21 15:59:56 +0000157 : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
Daniel Sanderse9fdba32017-04-29 17:30:09 +0000158 TRI(*STI.getRegisterInfo()), RBI(RBI),
159#define GET_GLOBALISEL_PREDICATES_INIT
160#include "X86GenGlobalISel.inc"
161#undef GET_GLOBALISEL_PREDICATES_INIT
Daniel Sanders8a4bae92017-03-14 21:32:08 +0000162#define GET_GLOBALISEL_TEMPORARIES_INIT
163#include "X86GenGlobalISel.inc"
164#undef GET_GLOBALISEL_TEMPORARIES_INIT
165{
166}
Igor Bregerf7359d82017-02-22 12:25:09 +0000167
168// FIXME: This should be target-independent, inferred from the types declared
169// for each class in the bank.
Igor Breger1dcd5e82017-06-20 09:15:10 +0000170const TargetRegisterClass *
171X86InstructionSelector::getRegClass(LLT Ty, const RegisterBank &RB) const {
Igor Bregerf7359d82017-02-22 12:25:09 +0000172 if (RB.getID() == X86::GPRRegBankID) {
Igor Breger4fdf1e42017-04-19 11:34:59 +0000173 if (Ty.getSizeInBits() <= 8)
174 return &X86::GR8RegClass;
175 if (Ty.getSizeInBits() == 16)
176 return &X86::GR16RegClass;
Igor Breger321cf3c2017-03-03 08:06:46 +0000177 if (Ty.getSizeInBits() == 32)
Igor Bregerf7359d82017-02-22 12:25:09 +0000178 return &X86::GR32RegClass;
179 if (Ty.getSizeInBits() == 64)
180 return &X86::GR64RegClass;
181 }
Igor Breger321cf3c2017-03-03 08:06:46 +0000182 if (RB.getID() == X86::VECRRegBankID) {
183 if (Ty.getSizeInBits() == 32)
Igor Breger1dcd5e82017-06-20 09:15:10 +0000184 return STI.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
Igor Breger321cf3c2017-03-03 08:06:46 +0000185 if (Ty.getSizeInBits() == 64)
Igor Breger1dcd5e82017-06-20 09:15:10 +0000186 return STI.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
Igor Breger321cf3c2017-03-03 08:06:46 +0000187 if (Ty.getSizeInBits() == 128)
Igor Breger1dcd5e82017-06-20 09:15:10 +0000188 return STI.hasAVX512() ? &X86::VR128XRegClass : &X86::VR128RegClass;
Igor Breger321cf3c2017-03-03 08:06:46 +0000189 if (Ty.getSizeInBits() == 256)
Igor Breger1dcd5e82017-06-20 09:15:10 +0000190 return STI.hasAVX512() ? &X86::VR256XRegClass : &X86::VR256RegClass;
Igor Breger321cf3c2017-03-03 08:06:46 +0000191 if (Ty.getSizeInBits() == 512)
192 return &X86::VR512RegClass;
193 }
Igor Bregerf7359d82017-02-22 12:25:09 +0000194
195 llvm_unreachable("Unknown RegBank!");
196}
197
Igor Breger1dcd5e82017-06-20 09:15:10 +0000198const TargetRegisterClass *
199X86InstructionSelector::getRegClass(LLT Ty, unsigned Reg,
200 MachineRegisterInfo &MRI) const {
201 const RegisterBank &RegBank = *RBI.getRegBank(Reg, MRI, TRI);
202 return getRegClass(Ty, RegBank);
203}
204
Benjamin Kramer49a49fe2017-08-20 13:03:48 +0000205static unsigned getSubRegIndex(const TargetRegisterClass *RC) {
Igor Bregerb3a860a2017-08-20 07:14:40 +0000206 unsigned SubIdx = X86::NoSubRegister;
207 if (RC == &X86::GR32RegClass) {
208 SubIdx = X86::sub_32bit;
209 } else if (RC == &X86::GR16RegClass) {
210 SubIdx = X86::sub_16bit;
211 } else if (RC == &X86::GR8RegClass) {
212 SubIdx = X86::sub_8bit;
213 }
214
215 return SubIdx;
216}
217
Benjamin Kramer49a49fe2017-08-20 13:03:48 +0000218static const TargetRegisterClass *getRegClassFromGRPhysReg(unsigned Reg) {
Igor Bregerb3a860a2017-08-20 07:14:40 +0000219 assert(TargetRegisterInfo::isPhysicalRegister(Reg));
220 if (X86::GR64RegClass.contains(Reg))
221 return &X86::GR64RegClass;
222 if (X86::GR32RegClass.contains(Reg))
223 return &X86::GR32RegClass;
224 if (X86::GR16RegClass.contains(Reg))
225 return &X86::GR16RegClass;
226 if (X86::GR8RegClass.contains(Reg))
227 return &X86::GR8RegClass;
228
229 llvm_unreachable("Unknown RegClass for PhysReg!");
230}
231
Igor Bregerf7359d82017-02-22 12:25:09 +0000232// Set X86 Opcode and constrain DestReg.
Igor Breger1dcd5e82017-06-20 09:15:10 +0000233bool X86InstructionSelector::selectCopy(MachineInstr &I,
234 MachineRegisterInfo &MRI) const {
Igor Bregerf7359d82017-02-22 12:25:09 +0000235 unsigned DstReg = I.getOperand(0).getReg();
Igor Bregerb3a860a2017-08-20 07:14:40 +0000236 const unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
237 const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
Igor Bregerf7359d82017-02-22 12:25:09 +0000238
Igor Bregerf7359d82017-02-22 12:25:09 +0000239 unsigned SrcReg = I.getOperand(1).getReg();
240 const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
Igor Bregerb3a860a2017-08-20 07:14:40 +0000241 const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
242
243 if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
244 assert(I.isCopy() && "Generic operators do not allow physical registers");
245
246 if (DstSize > SrcSize && SrcRegBank.getID() == X86::GPRRegBankID &&
247 DstRegBank.getID() == X86::GPRRegBankID) {
248
249 const TargetRegisterClass *SrcRC =
250 getRegClass(MRI.getType(SrcReg), SrcRegBank);
251 const TargetRegisterClass *DstRC = getRegClassFromGRPhysReg(DstReg);
252
253 if (SrcRC != DstRC) {
254 // This case can be generated by ABI lowering, performe anyext
255 unsigned ExtSrc = MRI.createVirtualRegister(DstRC);
256 BuildMI(*I.getParent(), I, I.getDebugLoc(),
257 TII.get(TargetOpcode::SUBREG_TO_REG))
258 .addDef(ExtSrc)
259 .addImm(0)
260 .addReg(SrcReg)
261 .addImm(getSubRegIndex(SrcRC));
262
263 I.getOperand(1).setReg(ExtSrc);
264 }
265 }
266
267 return true;
268 }
Igor Breger360d0f22017-04-27 08:02:03 +0000269
Igor Bregerf7359d82017-02-22 12:25:09 +0000270 assert((!TargetRegisterInfo::isPhysicalRegister(SrcReg) || I.isCopy()) &&
271 "No phys reg on generic operators");
272 assert((DstSize == SrcSize ||
273 // Copies are a mean to setup initial types, the number of
274 // bits may not exactly match.
275 (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
276 DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI))) &&
277 "Copy with different width?!");
278
Igor Bregerb3a860a2017-08-20 07:14:40 +0000279 const TargetRegisterClass *DstRC =
280 getRegClass(MRI.getType(DstReg), DstRegBank);
Igor Bregerf7359d82017-02-22 12:25:09 +0000281
Igor Bregerb3a860a2017-08-20 07:14:40 +0000282 if (SrcRegBank.getID() == X86::GPRRegBankID &&
283 DstRegBank.getID() == X86::GPRRegBankID && SrcSize > DstSize &&
284 TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
285 // Change the physical register to performe truncate.
Igor Breger360d0f22017-04-27 08:02:03 +0000286
Igor Bregerb3a860a2017-08-20 07:14:40 +0000287 const TargetRegisterClass *SrcRC = getRegClassFromGRPhysReg(SrcReg);
Igor Breger360d0f22017-04-27 08:02:03 +0000288
Igor Bregerb3a860a2017-08-20 07:14:40 +0000289 if (DstRC != SrcRC) {
290 I.getOperand(1).setSubReg(getSubRegIndex(DstRC));
Igor Breger360d0f22017-04-27 08:02:03 +0000291 I.getOperand(1).substPhysReg(SrcReg, TRI);
292 }
Igor Bregerf7359d82017-02-22 12:25:09 +0000293 }
294
295 // No need to constrain SrcReg. It will get constrained when
296 // we hit another of its use or its defs.
297 // Copies do not have constraints.
Igor Breger8a924be2017-03-23 12:13:29 +0000298 const TargetRegisterClass *OldRC = MRI.getRegClassOrNull(DstReg);
Igor Bregerb3a860a2017-08-20 07:14:40 +0000299 if (!OldRC || !DstRC->hasSubClassEq(OldRC)) {
300 if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000301 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
302 << " operand\n");
Igor Breger8a924be2017-03-23 12:13:29 +0000303 return false;
304 }
Igor Bregerf7359d82017-02-22 12:25:09 +0000305 }
306 I.setDesc(TII.get(X86::COPY));
307 return true;
308}
309
Daniel Sandersf76f3152017-11-16 00:46:35 +0000310bool X86InstructionSelector::select(MachineInstr &I,
311 CodeGenCoverage &CoverageInfo) const {
Igor Bregerf7359d82017-02-22 12:25:09 +0000312 assert(I.getParent() && "Instruction should be in a basic block!");
313 assert(I.getParent()->getParent() && "Instruction should be in a function!");
314
315 MachineBasicBlock &MBB = *I.getParent();
316 MachineFunction &MF = *MBB.getParent();
317 MachineRegisterInfo &MRI = MF.getRegInfo();
318
319 unsigned Opcode = I.getOpcode();
320 if (!isPreISelGenericOpcode(Opcode)) {
321 // Certain non-generic instructions also need some special handling.
322
Igor Breger03c22082017-08-21 09:17:28 +0000323 if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
324 return false;
Igor Breger03c22082017-08-21 09:17:28 +0000325
Igor Bregerf7359d82017-02-22 12:25:09 +0000326 if (I.isCopy())
Igor Breger1dcd5e82017-06-20 09:15:10 +0000327 return selectCopy(I, MRI);
Igor Bregerf7359d82017-02-22 12:25:09 +0000328
Igor Bregerf7359d82017-02-22 12:25:09 +0000329 return true;
330 }
331
Benjamin Kramer5a7e0f82017-02-22 12:59:47 +0000332 assert(I.getNumOperands() == I.getNumExplicitOperands() &&
333 "Generic instruction has unexpected implicit operands\n");
Igor Bregerf7359d82017-02-22 12:25:09 +0000334
Daniel Sandersf76f3152017-11-16 00:46:35 +0000335 if (selectImpl(I, CoverageInfo))
Igor Bregerfda31e62017-05-10 06:52:58 +0000336 return true;
Igor Breger2452ef02017-05-01 07:06:08 +0000337
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000338 LLVM_DEBUG(dbgs() << " C++ instruction selection: "; I.print(dbgs()));
Igor Breger2452ef02017-05-01 07:06:08 +0000339
340 // TODO: This should be implemented by tblgen.
Igor Breger06335bb2017-09-17 14:02:19 +0000341 switch (I.getOpcode()) {
342 default:
343 return false;
344 case TargetOpcode::G_STORE:
345 case TargetOpcode::G_LOAD:
346 return selectLoadStoreOp(I, MRI, MF);
347 case TargetOpcode::G_GEP:
348 case TargetOpcode::G_FRAME_INDEX:
349 return selectFrameIndexOrGep(I, MRI, MF);
350 case TargetOpcode::G_GLOBAL_VALUE:
351 return selectGlobalValue(I, MRI, MF);
352 case TargetOpcode::G_CONSTANT:
353 return selectConstant(I, MRI, MF);
354 case TargetOpcode::G_FCONSTANT:
355 return materializeFP(I, MRI, MF);
Alexander Ivchenko46e07e32018-02-28 09:18:47 +0000356 case TargetOpcode::G_PTRTOINT:
Igor Breger06335bb2017-09-17 14:02:19 +0000357 case TargetOpcode::G_TRUNC:
Alexander Ivchenko46e07e32018-02-28 09:18:47 +0000358 return selectTruncOrPtrToInt(I, MRI, MF);
Alexander Ivchenkoc01f7502018-02-28 12:11:53 +0000359 case TargetOpcode::G_INTTOPTR:
360 return selectCopy(I, MRI);
Igor Breger06335bb2017-09-17 14:02:19 +0000361 case TargetOpcode::G_ZEXT:
362 return selectZext(I, MRI, MF);
363 case TargetOpcode::G_ANYEXT:
364 return selectAnyext(I, MRI, MF);
365 case TargetOpcode::G_ICMP:
366 return selectCmp(I, MRI, MF);
Alexander Ivchenkoa26a3642018-08-31 09:38:27 +0000367 case TargetOpcode::G_FCMP:
368 return selectFCmp(I, MRI, MF);
Igor Breger06335bb2017-09-17 14:02:19 +0000369 case TargetOpcode::G_UADDE:
370 return selectUadde(I, MRI, MF);
371 case TargetOpcode::G_UNMERGE_VALUES:
Daniel Sandersf76f3152017-11-16 00:46:35 +0000372 return selectUnmergeValues(I, MRI, MF, CoverageInfo);
Igor Breger06335bb2017-09-17 14:02:19 +0000373 case TargetOpcode::G_MERGE_VALUES:
Daniel Sandersf76f3152017-11-16 00:46:35 +0000374 return selectMergeValues(I, MRI, MF, CoverageInfo);
Igor Breger06335bb2017-09-17 14:02:19 +0000375 case TargetOpcode::G_EXTRACT:
376 return selectExtract(I, MRI, MF);
377 case TargetOpcode::G_INSERT:
378 return selectInsert(I, MRI, MF);
379 case TargetOpcode::G_BRCOND:
380 return selectCondBranch(I, MRI, MF);
381 case TargetOpcode::G_IMPLICIT_DEF:
382 case TargetOpcode::G_PHI:
383 return selectImplicitDefOrPHI(I, MRI);
Alexander Ivchenko0bd4d8c2018-03-14 11:23:57 +0000384 case TargetOpcode::G_SHL:
385 case TargetOpcode::G_ASHR:
386 case TargetOpcode::G_LSHR:
387 return selectShift(I, MRI, MF);
Alexander Ivchenko86ef9ab2018-03-14 15:41:11 +0000388 case TargetOpcode::G_SDIV:
389 return selectSDiv(I, MRI, MF);
Igor Breger06335bb2017-09-17 14:02:19 +0000390 }
Igor Breger321cf3c2017-03-03 08:06:46 +0000391
Igor Breger2452ef02017-05-01 07:06:08 +0000392 return false;
Igor Bregerf7359d82017-02-22 12:25:09 +0000393}
Igor Breger321cf3c2017-03-03 08:06:46 +0000394
Igor Breger21200ed2017-09-17 08:08:13 +0000395unsigned X86InstructionSelector::getLoadStoreOp(const LLT &Ty,
396 const RegisterBank &RB,
Igor Bregera8ba5722017-03-23 15:25:57 +0000397 unsigned Opc,
398 uint64_t Alignment) const {
399 bool Isload = (Opc == TargetOpcode::G_LOAD);
400 bool HasAVX = STI.hasAVX();
401 bool HasAVX512 = STI.hasAVX512();
402 bool HasVLX = STI.hasVLX();
403
404 if (Ty == LLT::scalar(8)) {
405 if (X86::GPRRegBankID == RB.getID())
406 return Isload ? X86::MOV8rm : X86::MOV8mr;
407 } else if (Ty == LLT::scalar(16)) {
408 if (X86::GPRRegBankID == RB.getID())
409 return Isload ? X86::MOV16rm : X86::MOV16mr;
Igor Bregera9edb882017-05-01 06:08:32 +0000410 } else if (Ty == LLT::scalar(32) || Ty == LLT::pointer(0, 32)) {
Igor Bregera8ba5722017-03-23 15:25:57 +0000411 if (X86::GPRRegBankID == RB.getID())
412 return Isload ? X86::MOV32rm : X86::MOV32mr;
413 if (X86::VECRRegBankID == RB.getID())
414 return Isload ? (HasAVX512 ? X86::VMOVSSZrm
415 : HasAVX ? X86::VMOVSSrm : X86::MOVSSrm)
416 : (HasAVX512 ? X86::VMOVSSZmr
417 : HasAVX ? X86::VMOVSSmr : X86::MOVSSmr);
Igor Bregera9edb882017-05-01 06:08:32 +0000418 } else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) {
Igor Bregera8ba5722017-03-23 15:25:57 +0000419 if (X86::GPRRegBankID == RB.getID())
420 return Isload ? X86::MOV64rm : X86::MOV64mr;
421 if (X86::VECRRegBankID == RB.getID())
422 return Isload ? (HasAVX512 ? X86::VMOVSDZrm
423 : HasAVX ? X86::VMOVSDrm : X86::MOVSDrm)
424 : (HasAVX512 ? X86::VMOVSDZmr
425 : HasAVX ? X86::VMOVSDmr : X86::MOVSDmr);
426 } else if (Ty.isVector() && Ty.getSizeInBits() == 128) {
427 if (Alignment >= 16)
428 return Isload ? (HasVLX ? X86::VMOVAPSZ128rm
429 : HasAVX512
430 ? X86::VMOVAPSZ128rm_NOVLX
431 : HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm)
432 : (HasVLX ? X86::VMOVAPSZ128mr
433 : HasAVX512
434 ? X86::VMOVAPSZ128mr_NOVLX
435 : HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
436 else
437 return Isload ? (HasVLX ? X86::VMOVUPSZ128rm
438 : HasAVX512
439 ? X86::VMOVUPSZ128rm_NOVLX
440 : HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm)
441 : (HasVLX ? X86::VMOVUPSZ128mr
442 : HasAVX512
443 ? X86::VMOVUPSZ128mr_NOVLX
444 : HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
Igor Breger617be6e2017-05-23 08:23:51 +0000445 } else if (Ty.isVector() && Ty.getSizeInBits() == 256) {
446 if (Alignment >= 32)
447 return Isload ? (HasVLX ? X86::VMOVAPSZ256rm
448 : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
449 : X86::VMOVAPSYrm)
450 : (HasVLX ? X86::VMOVAPSZ256mr
451 : HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX
452 : X86::VMOVAPSYmr);
453 else
454 return Isload ? (HasVLX ? X86::VMOVUPSZ256rm
455 : HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX
456 : X86::VMOVUPSYrm)
457 : (HasVLX ? X86::VMOVUPSZ256mr
458 : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
459 : X86::VMOVUPSYmr);
460 } else if (Ty.isVector() && Ty.getSizeInBits() == 512) {
461 if (Alignment >= 64)
462 return Isload ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
463 else
464 return Isload ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
Igor Bregera8ba5722017-03-23 15:25:57 +0000465 }
466 return Opc;
467}
468
Igor Bregerbd2deda2017-06-19 13:12:57 +0000469// Fill in an address from the given instruction.
Benjamin Kramer49a49fe2017-08-20 13:03:48 +0000470static void X86SelectAddress(const MachineInstr &I,
471 const MachineRegisterInfo &MRI,
472 X86AddressMode &AM) {
Igor Bregerbd2deda2017-06-19 13:12:57 +0000473 assert(I.getOperand(0).isReg() && "unsupported opperand.");
474 assert(MRI.getType(I.getOperand(0).getReg()).isPointer() &&
475 "unsupported type.");
476
477 if (I.getOpcode() == TargetOpcode::G_GEP) {
478 if (auto COff = getConstantVRegVal(I.getOperand(2).getReg(), MRI)) {
479 int64_t Imm = *COff;
480 if (isInt<32>(Imm)) { // Check for displacement overflow.
481 AM.Disp = static_cast<int32_t>(Imm);
482 AM.Base.Reg = I.getOperand(1).getReg();
483 return;
484 }
485 }
486 } else if (I.getOpcode() == TargetOpcode::G_FRAME_INDEX) {
487 AM.Base.FrameIndex = I.getOperand(1).getIndex();
488 AM.BaseType = X86AddressMode::FrameIndexBase;
489 return;
490 }
491
492 // Default behavior.
493 AM.Base.Reg = I.getOperand(0).getReg();
Igor Bregerbd2deda2017-06-19 13:12:57 +0000494}
495
Igor Bregera8ba5722017-03-23 15:25:57 +0000496bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,
497 MachineRegisterInfo &MRI,
498 MachineFunction &MF) const {
Igor Bregera8ba5722017-03-23 15:25:57 +0000499 unsigned Opc = I.getOpcode();
500
Igor Breger06335bb2017-09-17 14:02:19 +0000501 assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&
502 "unexpected instruction");
Igor Bregera8ba5722017-03-23 15:25:57 +0000503
504 const unsigned DefReg = I.getOperand(0).getReg();
505 LLT Ty = MRI.getType(DefReg);
506 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
507
508 auto &MemOp = **I.memoperands_begin();
Daniel Sanders3c1c4c02017-12-05 05:52:07 +0000509 if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000510 LLVM_DEBUG(dbgs() << "Atomic load/store not supported yet\n");
Daniel Sanders3c1c4c02017-12-05 05:52:07 +0000511 return false;
512 }
513
Igor Bregera8ba5722017-03-23 15:25:57 +0000514 unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc, MemOp.getAlignment());
515 if (NewOpc == Opc)
516 return false;
517
Igor Bregerbd2deda2017-06-19 13:12:57 +0000518 X86AddressMode AM;
519 X86SelectAddress(*MRI.getVRegDef(I.getOperand(1).getReg()), MRI, AM);
520
Igor Bregera8ba5722017-03-23 15:25:57 +0000521 I.setDesc(TII.get(NewOpc));
522 MachineInstrBuilder MIB(MF, I);
Igor Bregerbd2deda2017-06-19 13:12:57 +0000523 if (Opc == TargetOpcode::G_LOAD) {
524 I.RemoveOperand(1);
525 addFullAddress(MIB, AM);
526 } else {
Igor Bregera8ba5722017-03-23 15:25:57 +0000527 // G_STORE (VAL, Addr), X86Store instruction (Addr, VAL)
Igor Bregerbd2deda2017-06-19 13:12:57 +0000528 I.RemoveOperand(1);
Igor Bregera8ba5722017-03-23 15:25:57 +0000529 I.RemoveOperand(0);
Igor Bregerbd2deda2017-06-19 13:12:57 +0000530 addFullAddress(MIB, AM).addUse(DefReg);
Igor Bregera8ba5722017-03-23 15:25:57 +0000531 }
532 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
533}
534
Igor Breger717bd362017-07-02 08:58:29 +0000535static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI) {
536 if (Ty == LLT::pointer(0, 64))
537 return X86::LEA64r;
538 else if (Ty == LLT::pointer(0, 32))
539 return STI.isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r;
540 else
541 llvm_unreachable("Can't get LEA opcode. Unsupported type.");
542}
543
Igor Breger810c6252017-05-08 09:40:43 +0000544bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I,
545 MachineRegisterInfo &MRI,
546 MachineFunction &MF) const {
547 unsigned Opc = I.getOpcode();
548
Igor Breger06335bb2017-09-17 14:02:19 +0000549 assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_GEP) &&
550 "unexpected instruction");
Igor Breger531a2032017-03-26 08:11:12 +0000551
552 const unsigned DefReg = I.getOperand(0).getReg();
553 LLT Ty = MRI.getType(DefReg);
554
Igor Breger810c6252017-05-08 09:40:43 +0000555 // Use LEA to calculate frame index and GEP
Igor Breger717bd362017-07-02 08:58:29 +0000556 unsigned NewOpc = getLeaOP(Ty, STI);
Igor Breger531a2032017-03-26 08:11:12 +0000557 I.setDesc(TII.get(NewOpc));
558 MachineInstrBuilder MIB(MF, I);
Igor Breger810c6252017-05-08 09:40:43 +0000559
560 if (Opc == TargetOpcode::G_FRAME_INDEX) {
561 addOffset(MIB, 0);
562 } else {
563 MachineOperand &InxOp = I.getOperand(2);
564 I.addOperand(InxOp); // set IndexReg
565 InxOp.ChangeToImmediate(1); // set Scale
566 MIB.addImm(0).addReg(0);
567 }
Igor Breger531a2032017-03-26 08:11:12 +0000568
569 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
570}
Daniel Sanders0b5293f2017-04-06 09:49:34 +0000571
Igor Breger717bd362017-07-02 08:58:29 +0000572bool X86InstructionSelector::selectGlobalValue(MachineInstr &I,
573 MachineRegisterInfo &MRI,
574 MachineFunction &MF) const {
Igor Breger06335bb2017-09-17 14:02:19 +0000575 assert((I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE) &&
576 "unexpected instruction");
Igor Breger717bd362017-07-02 08:58:29 +0000577
578 auto GV = I.getOperand(1).getGlobal();
579 if (GV->isThreadLocal()) {
580 return false; // TODO: we don't support TLS yet.
581 }
582
583 // Can't handle alternate code models yet.
584 if (TM.getCodeModel() != CodeModel::Small)
Eugene Zelenko60433b62017-10-05 00:33:50 +0000585 return false;
Igor Breger717bd362017-07-02 08:58:29 +0000586
587 X86AddressMode AM;
588 AM.GV = GV;
589 AM.GVOpFlags = STI.classifyGlobalReference(GV);
590
591 // TODO: The ABI requires an extra load. not supported yet.
592 if (isGlobalStubReference(AM.GVOpFlags))
593 return false;
594
595 // TODO: This reference is relative to the pic base. not supported yet.
596 if (isGlobalRelativeToPICBase(AM.GVOpFlags))
597 return false;
598
599 if (STI.isPICStyleRIPRel()) {
600 // Use rip-relative addressing.
601 assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
602 AM.Base.Reg = X86::RIP;
603 }
604
605 const unsigned DefReg = I.getOperand(0).getReg();
606 LLT Ty = MRI.getType(DefReg);
607 unsigned NewOpc = getLeaOP(Ty, STI);
608
609 I.setDesc(TII.get(NewOpc));
610 MachineInstrBuilder MIB(MF, I);
611
612 I.RemoveOperand(1);
613 addFullAddress(MIB, AM);
614
615 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
616}
617
Igor Breger3b97ea32017-04-12 12:54:54 +0000618bool X86InstructionSelector::selectConstant(MachineInstr &I,
619 MachineRegisterInfo &MRI,
620 MachineFunction &MF) const {
Igor Breger06335bb2017-09-17 14:02:19 +0000621 assert((I.getOpcode() == TargetOpcode::G_CONSTANT) &&
622 "unexpected instruction");
Igor Breger3b97ea32017-04-12 12:54:54 +0000623
624 const unsigned DefReg = I.getOperand(0).getReg();
625 LLT Ty = MRI.getType(DefReg);
626
Igor Breger5c787ab2017-07-03 11:06:54 +0000627 if (RBI.getRegBank(DefReg, MRI, TRI)->getID() != X86::GPRRegBankID)
628 return false;
Igor Breger3b97ea32017-04-12 12:54:54 +0000629
630 uint64_t Val = 0;
631 if (I.getOperand(1).isCImm()) {
632 Val = I.getOperand(1).getCImm()->getZExtValue();
633 I.getOperand(1).ChangeToImmediate(Val);
634 } else if (I.getOperand(1).isImm()) {
635 Val = I.getOperand(1).getImm();
636 } else
637 llvm_unreachable("Unsupported operand type.");
638
639 unsigned NewOpc;
640 switch (Ty.getSizeInBits()) {
641 case 8:
642 NewOpc = X86::MOV8ri;
643 break;
644 case 16:
645 NewOpc = X86::MOV16ri;
646 break;
647 case 32:
648 NewOpc = X86::MOV32ri;
649 break;
Eugene Zelenko60433b62017-10-05 00:33:50 +0000650 case 64:
Igor Breger3b97ea32017-04-12 12:54:54 +0000651 // TODO: in case isUInt<32>(Val), X86::MOV32ri can be used
652 if (isInt<32>(Val))
653 NewOpc = X86::MOV64ri32;
654 else
655 NewOpc = X86::MOV64ri;
656 break;
Igor Breger3b97ea32017-04-12 12:54:54 +0000657 default:
658 llvm_unreachable("Can't select G_CONSTANT, unsupported type.");
659 }
660
661 I.setDesc(TII.get(NewOpc));
662 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
663}
664
Alexander Ivchenko46e07e32018-02-28 09:18:47 +0000665// Helper function for selectTruncOrPtrToInt and selectAnyext.
Alexander Ivchenkoda9e81c2018-02-08 22:41:47 +0000666// Returns true if DstRC lives on a floating register class and
667// SrcRC lives on a 128-bit vector class.
668static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC,
669 const TargetRegisterClass *SrcRC) {
670 return (DstRC == &X86::FR32RegClass || DstRC == &X86::FR32XRegClass ||
671 DstRC == &X86::FR64RegClass || DstRC == &X86::FR64XRegClass) &&
672 (SrcRC == &X86::VR128RegClass || SrcRC == &X86::VR128XRegClass);
673}
674
675bool X86InstructionSelector::selectTurnIntoCOPY(
676 MachineInstr &I, MachineRegisterInfo &MRI, const unsigned DstReg,
677 const TargetRegisterClass *DstRC, const unsigned SrcReg,
678 const TargetRegisterClass *SrcRC) const {
679
680 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
681 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000682 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
683 << " operand\n");
Alexander Ivchenkoda9e81c2018-02-08 22:41:47 +0000684 return false;
685 }
686 I.setDesc(TII.get(X86::COPY));
687 return true;
688}
689
Alexander Ivchenko46e07e32018-02-28 09:18:47 +0000690bool X86InstructionSelector::selectTruncOrPtrToInt(MachineInstr &I,
691 MachineRegisterInfo &MRI,
692 MachineFunction &MF) const {
693 assert((I.getOpcode() == TargetOpcode::G_TRUNC ||
694 I.getOpcode() == TargetOpcode::G_PTRTOINT) &&
695 "unexpected instruction");
Igor Breger4fdf1e42017-04-19 11:34:59 +0000696
697 const unsigned DstReg = I.getOperand(0).getReg();
698 const unsigned SrcReg = I.getOperand(1).getReg();
699
700 const LLT DstTy = MRI.getType(DstReg);
701 const LLT SrcTy = MRI.getType(SrcReg);
702
703 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
704 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
705
706 if (DstRB.getID() != SrcRB.getID()) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000707 LLVM_DEBUG(dbgs() << TII.getName(I.getOpcode())
708 << " input/output on different banks\n");
Igor Breger4fdf1e42017-04-19 11:34:59 +0000709 return false;
710 }
711
Igor Breger1dcd5e82017-06-20 09:15:10 +0000712 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
Alexander Ivchenkoda9e81c2018-02-08 22:41:47 +0000713 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
714
715 if (!DstRC || !SrcRC)
Igor Breger4fdf1e42017-04-19 11:34:59 +0000716 return false;
717
Alexander Ivchenkoda9e81c2018-02-08 22:41:47 +0000718 // If that's truncation of the value that lives on the vector class and goes
719 // into the floating class, just replace it with copy, as we are able to
720 // select it as a regular move.
721 if (canTurnIntoCOPY(DstRC, SrcRC))
722 return selectTurnIntoCOPY(I, MRI, DstReg, DstRC, SrcReg, SrcRC);
723
724 if (DstRB.getID() != X86::GPRRegBankID)
Igor Breger4fdf1e42017-04-19 11:34:59 +0000725 return false;
726
Igor Breger014fc562017-05-21 11:13:56 +0000727 unsigned SubIdx;
728 if (DstRC == SrcRC) {
729 // Nothing to be done
730 SubIdx = X86::NoSubRegister;
731 } else if (DstRC == &X86::GR32RegClass) {
732 SubIdx = X86::sub_32bit;
733 } else if (DstRC == &X86::GR16RegClass) {
734 SubIdx = X86::sub_16bit;
735 } else if (DstRC == &X86::GR8RegClass) {
736 SubIdx = X86::sub_8bit;
737 } else {
738 return false;
739 }
740
741 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
742
Igor Breger4fdf1e42017-04-19 11:34:59 +0000743 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
744 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000745 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
746 << "\n");
Igor Breger4fdf1e42017-04-19 11:34:59 +0000747 return false;
748 }
749
Igor Breger014fc562017-05-21 11:13:56 +0000750 I.getOperand(1).setSubReg(SubIdx);
Igor Breger4fdf1e42017-04-19 11:34:59 +0000751
752 I.setDesc(TII.get(X86::COPY));
753 return true;
754}
755
Igor Bregerfda31e62017-05-10 06:52:58 +0000756bool X86InstructionSelector::selectZext(MachineInstr &I,
757 MachineRegisterInfo &MRI,
758 MachineFunction &MF) const {
Igor Breger06335bb2017-09-17 14:02:19 +0000759 assert((I.getOpcode() == TargetOpcode::G_ZEXT) && "unexpected instruction");
Igor Bregerfda31e62017-05-10 06:52:58 +0000760
761 const unsigned DstReg = I.getOperand(0).getReg();
762 const unsigned SrcReg = I.getOperand(1).getReg();
763
764 const LLT DstTy = MRI.getType(DstReg);
765 const LLT SrcTy = MRI.getType(SrcReg);
766
Alexander Ivchenko327de802018-03-14 09:11:23 +0000767 assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(32)) &&
768 "8=>32 Zext is handled by tablegen");
769 assert(!(SrcTy == LLT::scalar(16) && DstTy == LLT::scalar(32)) &&
770 "16=>32 Zext is handled by tablegen");
771
772 const static struct ZextEntry {
773 LLT SrcTy;
774 LLT DstTy;
775 unsigned MovOp;
776 bool NeedSubregToReg;
777 } OpTable[] = {
778 {LLT::scalar(8), LLT::scalar(16), X86::MOVZX16rr8, false}, // i8 => i16
779 {LLT::scalar(8), LLT::scalar(64), X86::MOVZX32rr8, true}, // i8 => i64
780 {LLT::scalar(16), LLT::scalar(64), X86::MOVZX32rr16, true}, // i16 => i64
781 {LLT::scalar(32), LLT::scalar(64), 0, true} // i32 => i64
782 };
783
784 auto ZextEntryIt =
785 std::find_if(std::begin(OpTable), std::end(OpTable),
786 [SrcTy, DstTy](const ZextEntry &El) {
787 return El.DstTy == DstTy && El.SrcTy == SrcTy;
788 });
789
790 // Here we try to select Zext into a MOVZ and/or SUBREG_TO_REG instruction.
791 if (ZextEntryIt != std::end(OpTable)) {
792 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
793 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
794 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
795 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
796
797 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
798 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000799 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
800 << " operand\n");
Alexander Ivchenko327de802018-03-14 09:11:23 +0000801 return false;
802 }
803
804 unsigned TransitRegTo = DstReg;
805 unsigned TransitRegFrom = SrcReg;
806 if (ZextEntryIt->MovOp) {
807 // If we select Zext into MOVZ + SUBREG_TO_REG, we need to have
808 // a transit register in between: create it here.
809 if (ZextEntryIt->NeedSubregToReg) {
810 TransitRegFrom = MRI.createVirtualRegister(
811 getRegClass(LLT::scalar(32), DstReg, MRI));
812 TransitRegTo = TransitRegFrom;
813 }
814
815 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(ZextEntryIt->MovOp))
816 .addDef(TransitRegTo)
817 .addReg(SrcReg);
818 }
819 if (ZextEntryIt->NeedSubregToReg) {
820 BuildMI(*I.getParent(), I, I.getDebugLoc(),
821 TII.get(TargetOpcode::SUBREG_TO_REG))
822 .addDef(DstReg)
823 .addImm(0)
824 .addReg(TransitRegFrom)
825 .addImm(X86::sub_32bit);
826 }
827 I.eraseFromParent();
828 return true;
829 }
830
Igor Bregerd48c5e42017-07-10 09:07:34 +0000831 if (SrcTy != LLT::scalar(1))
832 return false;
Igor Bregerfda31e62017-05-10 06:52:58 +0000833
Igor Bregerd48c5e42017-07-10 09:07:34 +0000834 unsigned AndOpc;
835 if (DstTy == LLT::scalar(8))
Igor Breger324d3792017-07-11 08:04:51 +0000836 AndOpc = X86::AND8ri;
Igor Bregerd48c5e42017-07-10 09:07:34 +0000837 else if (DstTy == LLT::scalar(16))
838 AndOpc = X86::AND16ri8;
839 else if (DstTy == LLT::scalar(32))
840 AndOpc = X86::AND32ri8;
841 else if (DstTy == LLT::scalar(64))
842 AndOpc = X86::AND64ri8;
843 else
844 return false;
Igor Bregerfda31e62017-05-10 06:52:58 +0000845
Igor Bregerd48c5e42017-07-10 09:07:34 +0000846 unsigned DefReg = SrcReg;
847 if (DstTy != LLT::scalar(8)) {
848 DefReg = MRI.createVirtualRegister(getRegClass(DstTy, DstReg, MRI));
Igor Bregerfda31e62017-05-10 06:52:58 +0000849 BuildMI(*I.getParent(), I, I.getDebugLoc(),
850 TII.get(TargetOpcode::SUBREG_TO_REG), DefReg)
851 .addImm(0)
852 .addReg(SrcReg)
853 .addImm(X86::sub_8bit);
Igor Bregerfda31e62017-05-10 06:52:58 +0000854 }
855
Igor Bregerd48c5e42017-07-10 09:07:34 +0000856 MachineInstr &AndInst =
857 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AndOpc), DstReg)
858 .addReg(DefReg)
859 .addImm(1);
860
861 constrainSelectedInstRegOperands(AndInst, TII, TRI, RBI);
862
863 I.eraseFromParent();
864 return true;
Igor Bregerfda31e62017-05-10 06:52:58 +0000865}
866
Igor Breger1f143642017-09-11 09:41:13 +0000867bool X86InstructionSelector::selectAnyext(MachineInstr &I,
868 MachineRegisterInfo &MRI,
869 MachineFunction &MF) const {
Igor Breger06335bb2017-09-17 14:02:19 +0000870 assert((I.getOpcode() == TargetOpcode::G_ANYEXT) && "unexpected instruction");
Igor Breger1f143642017-09-11 09:41:13 +0000871
872 const unsigned DstReg = I.getOperand(0).getReg();
873 const unsigned SrcReg = I.getOperand(1).getReg();
874
875 const LLT DstTy = MRI.getType(DstReg);
876 const LLT SrcTy = MRI.getType(SrcReg);
877
878 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
879 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
880
Igor Breger21200ed2017-09-17 08:08:13 +0000881 assert(DstRB.getID() == SrcRB.getID() &&
882 "G_ANYEXT input/output on different banks\n");
Igor Breger1f143642017-09-11 09:41:13 +0000883
Igor Breger21200ed2017-09-17 08:08:13 +0000884 assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
885 "G_ANYEXT incorrect operand size");
Igor Breger1f143642017-09-11 09:41:13 +0000886
Igor Breger1f143642017-09-11 09:41:13 +0000887 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
888 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
889
Alexander Ivchenkoda9e81c2018-02-08 22:41:47 +0000890 // If that's ANY_EXT of the value that lives on the floating class and goes
891 // into the vector class, just replace it with copy, as we are able to select
892 // it as a regular move.
893 if (canTurnIntoCOPY(SrcRC, DstRC))
894 return selectTurnIntoCOPY(I, MRI, SrcReg, SrcRC, DstReg, DstRC);
895
896 if (DstRB.getID() != X86::GPRRegBankID)
897 return false;
898
Igor Breger1f143642017-09-11 09:41:13 +0000899 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
900 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000901 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
902 << " operand\n");
Igor Breger1f143642017-09-11 09:41:13 +0000903 return false;
904 }
905
906 if (SrcRC == DstRC) {
907 I.setDesc(TII.get(X86::COPY));
908 return true;
909 }
910
911 BuildMI(*I.getParent(), I, I.getDebugLoc(),
912 TII.get(TargetOpcode::SUBREG_TO_REG))
913 .addDef(DstReg)
914 .addImm(0)
915 .addReg(SrcReg)
916 .addImm(getSubRegIndex(SrcRC));
917
918 I.eraseFromParent();
919 return true;
920}
921
Igor Bregerc7b59772017-05-11 07:17:40 +0000922bool X86InstructionSelector::selectCmp(MachineInstr &I,
923 MachineRegisterInfo &MRI,
924 MachineFunction &MF) const {
Igor Breger06335bb2017-09-17 14:02:19 +0000925 assert((I.getOpcode() == TargetOpcode::G_ICMP) && "unexpected instruction");
Igor Bregerc7b59772017-05-11 07:17:40 +0000926
927 X86::CondCode CC;
928 bool SwapArgs;
929 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(
930 (CmpInst::Predicate)I.getOperand(1).getPredicate());
931 unsigned OpSet = X86::getSETFromCond(CC);
932
933 unsigned LHS = I.getOperand(2).getReg();
934 unsigned RHS = I.getOperand(3).getReg();
935
936 if (SwapArgs)
937 std::swap(LHS, RHS);
938
939 unsigned OpCmp;
940 LLT Ty = MRI.getType(LHS);
941
942 switch (Ty.getSizeInBits()) {
943 default:
944 return false;
945 case 8:
946 OpCmp = X86::CMP8rr;
947 break;
948 case 16:
949 OpCmp = X86::CMP16rr;
950 break;
951 case 32:
952 OpCmp = X86::CMP32rr;
953 break;
954 case 64:
955 OpCmp = X86::CMP64rr;
956 break;
957 }
958
959 MachineInstr &CmpInst =
960 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
961 .addReg(LHS)
962 .addReg(RHS);
963
964 MachineInstr &SetInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
965 TII.get(OpSet), I.getOperand(0).getReg());
966
967 constrainSelectedInstRegOperands(CmpInst, TII, TRI, RBI);
968 constrainSelectedInstRegOperands(SetInst, TII, TRI, RBI);
969
970 I.eraseFromParent();
971 return true;
972}
973
Alexander Ivchenkoa26a3642018-08-31 09:38:27 +0000974bool X86InstructionSelector::selectFCmp(MachineInstr &I,
975 MachineRegisterInfo &MRI,
976 MachineFunction &MF) const {
977 assert((I.getOpcode() == TargetOpcode::G_FCMP) && "unexpected instruction");
978
979 unsigned LhsReg = I.getOperand(2).getReg();
980 unsigned RhsReg = I.getOperand(3).getReg();
981 CmpInst::Predicate Predicate =
982 (CmpInst::Predicate)I.getOperand(1).getPredicate();
983
984 // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
985 static const uint16_t SETFOpcTable[2][3] = {
986 {X86::SETEr, X86::SETNPr, X86::AND8rr},
987 {X86::SETNEr, X86::SETPr, X86::OR8rr}};
988 const uint16_t *SETFOpc = nullptr;
989 switch (Predicate) {
990 default:
991 break;
992 case CmpInst::FCMP_OEQ:
993 SETFOpc = &SETFOpcTable[0][0];
994 break;
995 case CmpInst::FCMP_UNE:
996 SETFOpc = &SETFOpcTable[1][0];
997 break;
998 }
999
1000 // Compute the opcode for the CMP instruction.
1001 unsigned OpCmp;
1002 LLT Ty = MRI.getType(LhsReg);
1003 switch (Ty.getSizeInBits()) {
1004 default:
1005 return false;
1006 case 32:
1007 OpCmp = X86::UCOMISSrr;
1008 break;
1009 case 64:
1010 OpCmp = X86::UCOMISDrr;
1011 break;
1012 }
1013
1014 unsigned ResultReg = I.getOperand(0).getReg();
1015 RBI.constrainGenericRegister(
1016 ResultReg,
1017 *getRegClass(LLT::scalar(8), *RBI.getRegBank(ResultReg, MRI, TRI)), MRI);
1018 if (SETFOpc) {
1019 MachineInstr &CmpInst =
1020 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
1021 .addReg(LhsReg)
1022 .addReg(RhsReg);
1023
1024 unsigned FlagReg1 = MRI.createVirtualRegister(&X86::GR8RegClass);
1025 unsigned FlagReg2 = MRI.createVirtualRegister(&X86::GR8RegClass);
1026 MachineInstr &Set1 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1027 TII.get(SETFOpc[0]), FlagReg1);
1028 MachineInstr &Set2 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1029 TII.get(SETFOpc[1]), FlagReg2);
1030 MachineInstr &Set3 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1031 TII.get(SETFOpc[2]), ResultReg)
1032 .addReg(FlagReg1)
1033 .addReg(FlagReg2);
1034 constrainSelectedInstRegOperands(CmpInst, TII, TRI, RBI);
1035 constrainSelectedInstRegOperands(Set1, TII, TRI, RBI);
1036 constrainSelectedInstRegOperands(Set2, TII, TRI, RBI);
1037 constrainSelectedInstRegOperands(Set3, TII, TRI, RBI);
1038
1039 I.eraseFromParent();
1040 return true;
1041 }
1042
1043 X86::CondCode CC;
1044 bool SwapArgs;
1045 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(Predicate);
1046 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
1047 unsigned Opc = X86::getSETFromCond(CC);
1048
1049 if (SwapArgs)
1050 std::swap(LhsReg, RhsReg);
1051
1052 // Emit a compare of LHS/RHS.
1053 MachineInstr &CmpInst =
1054 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
1055 .addReg(LhsReg)
1056 .addReg(RhsReg);
1057
1058 MachineInstr &Set =
1059 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opc), ResultReg);
1060 constrainSelectedInstRegOperands(CmpInst, TII, TRI, RBI);
1061 constrainSelectedInstRegOperands(Set, TII, TRI, RBI);
1062 I.eraseFromParent();
1063 return true;
1064}
1065
Igor Breger28f290f2017-05-17 12:48:08 +00001066bool X86InstructionSelector::selectUadde(MachineInstr &I,
1067 MachineRegisterInfo &MRI,
1068 MachineFunction &MF) const {
Igor Breger06335bb2017-09-17 14:02:19 +00001069 assert((I.getOpcode() == TargetOpcode::G_UADDE) && "unexpected instruction");
Igor Breger28f290f2017-05-17 12:48:08 +00001070
1071 const unsigned DstReg = I.getOperand(0).getReg();
1072 const unsigned CarryOutReg = I.getOperand(1).getReg();
1073 const unsigned Op0Reg = I.getOperand(2).getReg();
1074 const unsigned Op1Reg = I.getOperand(3).getReg();
1075 unsigned CarryInReg = I.getOperand(4).getReg();
1076
1077 const LLT DstTy = MRI.getType(DstReg);
1078
1079 if (DstTy != LLT::scalar(32))
1080 return false;
1081
1082 // find CarryIn def instruction.
1083 MachineInstr *Def = MRI.getVRegDef(CarryInReg);
1084 while (Def->getOpcode() == TargetOpcode::G_TRUNC) {
1085 CarryInReg = Def->getOperand(1).getReg();
1086 Def = MRI.getVRegDef(CarryInReg);
1087 }
1088
1089 unsigned Opcode;
1090 if (Def->getOpcode() == TargetOpcode::G_UADDE) {
1091 // carry set by prev ADD.
1092
1093 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), X86::EFLAGS)
1094 .addReg(CarryInReg);
1095
1096 if (!RBI.constrainGenericRegister(CarryInReg, X86::GR32RegClass, MRI))
1097 return false;
1098
1099 Opcode = X86::ADC32rr;
1100 } else if (auto val = getConstantVRegVal(CarryInReg, MRI)) {
1101 // carry is constant, support only 0.
1102 if (*val != 0)
1103 return false;
1104
1105 Opcode = X86::ADD32rr;
1106 } else
1107 return false;
1108
1109 MachineInstr &AddInst =
1110 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg)
1111 .addReg(Op0Reg)
1112 .addReg(Op1Reg);
1113
1114 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), CarryOutReg)
1115 .addReg(X86::EFLAGS);
1116
1117 if (!constrainSelectedInstRegOperands(AddInst, TII, TRI, RBI) ||
1118 !RBI.constrainGenericRegister(CarryOutReg, X86::GR32RegClass, MRI))
1119 return false;
1120
1121 I.eraseFromParent();
1122 return true;
1123}
1124
Igor Bregerf5035d62017-06-25 11:42:17 +00001125bool X86InstructionSelector::selectExtract(MachineInstr &I,
1126 MachineRegisterInfo &MRI,
1127 MachineFunction &MF) const {
Igor Breger06335bb2017-09-17 14:02:19 +00001128 assert((I.getOpcode() == TargetOpcode::G_EXTRACT) &&
1129 "unexpected instruction");
Igor Bregerf5035d62017-06-25 11:42:17 +00001130
1131 const unsigned DstReg = I.getOperand(0).getReg();
1132 const unsigned SrcReg = I.getOperand(1).getReg();
1133 int64_t Index = I.getOperand(2).getImm();
1134
1135 const LLT DstTy = MRI.getType(DstReg);
1136 const LLT SrcTy = MRI.getType(SrcReg);
1137
1138 // Meanwile handle vector type only.
1139 if (!DstTy.isVector())
1140 return false;
1141
1142 if (Index % DstTy.getSizeInBits() != 0)
1143 return false; // Not extract subvector.
1144
1145 if (Index == 0) {
1146 // Replace by extract subreg copy.
1147 if (!emitExtractSubreg(DstReg, SrcReg, I, MRI, MF))
1148 return false;
1149
1150 I.eraseFromParent();
1151 return true;
1152 }
1153
1154 bool HasAVX = STI.hasAVX();
1155 bool HasAVX512 = STI.hasAVX512();
1156 bool HasVLX = STI.hasVLX();
1157
1158 if (SrcTy.getSizeInBits() == 256 && DstTy.getSizeInBits() == 128) {
1159 if (HasVLX)
1160 I.setDesc(TII.get(X86::VEXTRACTF32x4Z256rr));
1161 else if (HasAVX)
1162 I.setDesc(TII.get(X86::VEXTRACTF128rr));
1163 else
1164 return false;
1165 } else if (SrcTy.getSizeInBits() == 512 && HasAVX512) {
1166 if (DstTy.getSizeInBits() == 128)
1167 I.setDesc(TII.get(X86::VEXTRACTF32x4Zrr));
1168 else if (DstTy.getSizeInBits() == 256)
1169 I.setDesc(TII.get(X86::VEXTRACTF64x4Zrr));
1170 else
1171 return false;
1172 } else
1173 return false;
1174
1175 // Convert to X86 VEXTRACT immediate.
1176 Index = Index / DstTy.getSizeInBits();
1177 I.getOperand(2).setImm(Index);
1178
1179 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1180}
1181
1182bool X86InstructionSelector::emitExtractSubreg(unsigned DstReg, unsigned SrcReg,
1183 MachineInstr &I,
1184 MachineRegisterInfo &MRI,
1185 MachineFunction &MF) const {
Igor Bregerf5035d62017-06-25 11:42:17 +00001186 const LLT DstTy = MRI.getType(DstReg);
1187 const LLT SrcTy = MRI.getType(SrcReg);
1188 unsigned SubIdx = X86::NoSubRegister;
1189
1190 if (!DstTy.isVector() || !SrcTy.isVector())
1191 return false;
1192
1193 assert(SrcTy.getSizeInBits() > DstTy.getSizeInBits() &&
1194 "Incorrect Src/Dst register size");
1195
1196 if (DstTy.getSizeInBits() == 128)
1197 SubIdx = X86::sub_xmm;
1198 else if (DstTy.getSizeInBits() == 256)
1199 SubIdx = X86::sub_ymm;
1200 else
1201 return false;
1202
1203 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
1204 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
1205
1206 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
1207
1208 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1209 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001210 LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
Igor Bregerf5035d62017-06-25 11:42:17 +00001211 return false;
1212 }
1213
1214 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), DstReg)
1215 .addReg(SrcReg, 0, SubIdx);
1216
1217 return true;
1218}
1219
Igor Breger1c29be72017-06-22 09:43:35 +00001220bool X86InstructionSelector::emitInsertSubreg(unsigned DstReg, unsigned SrcReg,
1221 MachineInstr &I,
1222 MachineRegisterInfo &MRI,
1223 MachineFunction &MF) const {
Igor Breger1c29be72017-06-22 09:43:35 +00001224 const LLT DstTy = MRI.getType(DstReg);
1225 const LLT SrcTy = MRI.getType(SrcReg);
1226 unsigned SubIdx = X86::NoSubRegister;
1227
1228 // TODO: support scalar types
1229 if (!DstTy.isVector() || !SrcTy.isVector())
1230 return false;
1231
1232 assert(SrcTy.getSizeInBits() < DstTy.getSizeInBits() &&
1233 "Incorrect Src/Dst register size");
1234
1235 if (SrcTy.getSizeInBits() == 128)
1236 SubIdx = X86::sub_xmm;
1237 else if (SrcTy.getSizeInBits() == 256)
1238 SubIdx = X86::sub_ymm;
1239 else
1240 return false;
1241
1242 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
1243 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
1244
1245 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1246 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001247 LLVM_DEBUG(dbgs() << "Failed to constrain INSERT_SUBREG\n");
Igor Breger1c29be72017-06-22 09:43:35 +00001248 return false;
1249 }
1250
1251 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY))
1252 .addReg(DstReg, RegState::DefineNoRead, SubIdx)
1253 .addReg(SrcReg);
1254
1255 return true;
1256}
1257
1258bool X86InstructionSelector::selectInsert(MachineInstr &I,
1259 MachineRegisterInfo &MRI,
1260 MachineFunction &MF) const {
Igor Breger06335bb2017-09-17 14:02:19 +00001261 assert((I.getOpcode() == TargetOpcode::G_INSERT) && "unexpected instruction");
Igor Breger1c29be72017-06-22 09:43:35 +00001262
1263 const unsigned DstReg = I.getOperand(0).getReg();
1264 const unsigned SrcReg = I.getOperand(1).getReg();
1265 const unsigned InsertReg = I.getOperand(2).getReg();
1266 int64_t Index = I.getOperand(3).getImm();
1267
1268 const LLT DstTy = MRI.getType(DstReg);
1269 const LLT InsertRegTy = MRI.getType(InsertReg);
1270
1271 // Meanwile handle vector type only.
1272 if (!DstTy.isVector())
1273 return false;
1274
1275 if (Index % InsertRegTy.getSizeInBits() != 0)
1276 return false; // Not insert subvector.
1277
1278 if (Index == 0 && MRI.getVRegDef(SrcReg)->isImplicitDef()) {
1279 // Replace by subreg copy.
1280 if (!emitInsertSubreg(DstReg, InsertReg, I, MRI, MF))
1281 return false;
1282
1283 I.eraseFromParent();
1284 return true;
1285 }
1286
1287 bool HasAVX = STI.hasAVX();
1288 bool HasAVX512 = STI.hasAVX512();
1289 bool HasVLX = STI.hasVLX();
1290
1291 if (DstTy.getSizeInBits() == 256 && InsertRegTy.getSizeInBits() == 128) {
1292 if (HasVLX)
1293 I.setDesc(TII.get(X86::VINSERTF32x4Z256rr));
1294 else if (HasAVX)
1295 I.setDesc(TII.get(X86::VINSERTF128rr));
1296 else
1297 return false;
1298 } else if (DstTy.getSizeInBits() == 512 && HasAVX512) {
1299 if (InsertRegTy.getSizeInBits() == 128)
1300 I.setDesc(TII.get(X86::VINSERTF32x4Zrr));
1301 else if (InsertRegTy.getSizeInBits() == 256)
1302 I.setDesc(TII.get(X86::VINSERTF64x4Zrr));
1303 else
1304 return false;
1305 } else
1306 return false;
1307
1308 // Convert to X86 VINSERT immediate.
1309 Index = Index / InsertRegTy.getSizeInBits();
1310
1311 I.getOperand(3).setImm(Index);
1312
1313 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1314}
1315
Daniel Sandersf76f3152017-11-16 00:46:35 +00001316bool X86InstructionSelector::selectUnmergeValues(
1317 MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF,
1318 CodeGenCoverage &CoverageInfo) const {
Igor Breger06335bb2017-09-17 14:02:19 +00001319 assert((I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES) &&
1320 "unexpected instruction");
Igor Bregerb186a692017-07-02 08:15:49 +00001321
1322 // Split to extracts.
1323 unsigned NumDefs = I.getNumOperands() - 1;
1324 unsigned SrcReg = I.getOperand(NumDefs).getReg();
1325 unsigned DefSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
1326
1327 for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {
Igor Bregerb186a692017-07-02 08:15:49 +00001328 MachineInstr &ExtrInst =
1329 *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1330 TII.get(TargetOpcode::G_EXTRACT), I.getOperand(Idx).getReg())
1331 .addReg(SrcReg)
1332 .addImm(Idx * DefSize);
1333
Daniel Sandersf76f3152017-11-16 00:46:35 +00001334 if (!select(ExtrInst, CoverageInfo))
Igor Bregerb186a692017-07-02 08:15:49 +00001335 return false;
1336 }
1337
1338 I.eraseFromParent();
1339 return true;
1340}
1341
Daniel Sandersf76f3152017-11-16 00:46:35 +00001342bool X86InstructionSelector::selectMergeValues(
1343 MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF,
1344 CodeGenCoverage &CoverageInfo) const {
Igor Breger06335bb2017-09-17 14:02:19 +00001345 assert((I.getOpcode() == TargetOpcode::G_MERGE_VALUES) &&
1346 "unexpected instruction");
Igor Breger0cddd342017-06-29 12:08:28 +00001347
1348 // Split to inserts.
1349 unsigned DstReg = I.getOperand(0).getReg();
1350 unsigned SrcReg0 = I.getOperand(1).getReg();
1351
1352 const LLT DstTy = MRI.getType(DstReg);
1353 const LLT SrcTy = MRI.getType(SrcReg0);
1354 unsigned SrcSize = SrcTy.getSizeInBits();
1355
1356 const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
1357
1358 // For the first src use insertSubReg.
1359 unsigned DefReg = MRI.createGenericVirtualRegister(DstTy);
1360 MRI.setRegBank(DefReg, RegBank);
1361 if (!emitInsertSubreg(DefReg, I.getOperand(1).getReg(), I, MRI, MF))
1362 return false;
1363
1364 for (unsigned Idx = 2; Idx < I.getNumOperands(); ++Idx) {
Igor Breger0cddd342017-06-29 12:08:28 +00001365 unsigned Tmp = MRI.createGenericVirtualRegister(DstTy);
1366 MRI.setRegBank(Tmp, RegBank);
1367
1368 MachineInstr &InsertInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1369 TII.get(TargetOpcode::G_INSERT), Tmp)
1370 .addReg(DefReg)
1371 .addReg(I.getOperand(Idx).getReg())
1372 .addImm((Idx - 1) * SrcSize);
1373
1374 DefReg = Tmp;
1375
Daniel Sandersf76f3152017-11-16 00:46:35 +00001376 if (!select(InsertInst, CoverageInfo))
Igor Breger0cddd342017-06-29 12:08:28 +00001377 return false;
1378 }
1379
1380 MachineInstr &CopyInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1381 TII.get(TargetOpcode::COPY), DstReg)
1382 .addReg(DefReg);
1383
Daniel Sandersf76f3152017-11-16 00:46:35 +00001384 if (!select(CopyInst, CoverageInfo))
Igor Breger0cddd342017-06-29 12:08:28 +00001385 return false;
1386
1387 I.eraseFromParent();
1388 return true;
1389}
Igor Breger685889c2017-08-21 10:51:54 +00001390
1391bool X86InstructionSelector::selectCondBranch(MachineInstr &I,
1392 MachineRegisterInfo &MRI,
1393 MachineFunction &MF) const {
Igor Breger06335bb2017-09-17 14:02:19 +00001394 assert((I.getOpcode() == TargetOpcode::G_BRCOND) && "unexpected instruction");
Igor Breger685889c2017-08-21 10:51:54 +00001395
1396 const unsigned CondReg = I.getOperand(0).getReg();
1397 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
1398
1399 MachineInstr &TestInst =
1400 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::TEST8ri))
1401 .addReg(CondReg)
1402 .addImm(1);
1403 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::JNE_1))
1404 .addMBB(DestMBB);
1405
1406 constrainSelectedInstRegOperands(TestInst, TII, TRI, RBI);
1407
1408 I.eraseFromParent();
1409 return true;
1410}
1411
Igor Breger21200ed2017-09-17 08:08:13 +00001412bool X86InstructionSelector::materializeFP(MachineInstr &I,
1413 MachineRegisterInfo &MRI,
1414 MachineFunction &MF) const {
Igor Breger06335bb2017-09-17 14:02:19 +00001415 assert((I.getOpcode() == TargetOpcode::G_FCONSTANT) &&
1416 "unexpected instruction");
Igor Breger21200ed2017-09-17 08:08:13 +00001417
1418 // Can't handle alternate code models yet.
1419 CodeModel::Model CM = TM.getCodeModel();
1420 if (CM != CodeModel::Small && CM != CodeModel::Large)
1421 return false;
1422
1423 const unsigned DstReg = I.getOperand(0).getReg();
1424 const LLT DstTy = MRI.getType(DstReg);
1425 const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
1426 unsigned Align = DstTy.getSizeInBits();
1427 const DebugLoc &DbgLoc = I.getDebugLoc();
1428
1429 unsigned Opc = getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Align);
1430
1431 // Create the load from the constant pool.
1432 const ConstantFP *CFP = I.getOperand(1).getFPImm();
1433 unsigned CPI = MF.getConstantPool()->getConstantPoolIndex(CFP, Align);
1434 MachineInstr *LoadInst = nullptr;
1435 unsigned char OpFlag = STI.classifyLocalReference(nullptr);
1436
1437 if (CM == CodeModel::Large && STI.is64Bit()) {
1438 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
1439 // they cannot be folded into immediate fields.
1440
1441 unsigned AddrReg = MRI.createVirtualRegister(&X86::GR64RegClass);
1442 BuildMI(*I.getParent(), I, DbgLoc, TII.get(X86::MOV64ri), AddrReg)
1443 .addConstantPoolIndex(CPI, 0, OpFlag);
1444
1445 MachineMemOperand *MMO = MF.getMachineMemOperand(
1446 MachinePointerInfo::getConstantPool(MF), MachineMemOperand::MOLoad,
1447 MF.getDataLayout().getPointerSize(), Align);
1448
1449 LoadInst =
1450 addDirectMem(BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg),
1451 AddrReg)
1452 .addMemOperand(MMO);
1453
Igor Breger06335bb2017-09-17 14:02:19 +00001454 } else if (CM == CodeModel::Small || !STI.is64Bit()) {
Igor Breger21200ed2017-09-17 08:08:13 +00001455 // Handle the case when globals fit in our immediate field.
1456 // This is true for X86-32 always and X86-64 when in -mcmodel=small mode.
1457
1458 // x86-32 PIC requires a PIC base register for constant pools.
1459 unsigned PICBase = 0;
1460 if (OpFlag == X86II::MO_PIC_BASE_OFFSET || OpFlag == X86II::MO_GOTOFF) {
1461 // PICBase can be allocated by TII.getGlobalBaseReg(&MF).
1462 // In DAGISEL the code that initialize it generated by the CGBR pass.
1463 return false; // TODO support the mode.
Igor Breger06335bb2017-09-17 14:02:19 +00001464 } else if (STI.is64Bit() && TM.getCodeModel() == CodeModel::Small)
Igor Breger21200ed2017-09-17 08:08:13 +00001465 PICBase = X86::RIP;
1466
1467 LoadInst = addConstantPoolReference(
1468 BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg), CPI, PICBase,
1469 OpFlag);
1470 } else
1471 return false;
1472
1473 constrainSelectedInstRegOperands(*LoadInst, TII, TRI, RBI);
1474 I.eraseFromParent();
1475 return true;
1476}
1477
Igor Breger2661ae42017-09-04 09:06:45 +00001478bool X86InstructionSelector::selectImplicitDefOrPHI(
1479 MachineInstr &I, MachineRegisterInfo &MRI) const {
Igor Breger06335bb2017-09-17 14:02:19 +00001480 assert((I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
1481 I.getOpcode() == TargetOpcode::G_PHI) &&
1482 "unexpected instruction");
Igor Breger47be5fb2017-08-24 07:06:27 +00001483
1484 unsigned DstReg = I.getOperand(0).getReg();
1485
1486 if (!MRI.getRegClassOrNull(DstReg)) {
1487 const LLT DstTy = MRI.getType(DstReg);
1488 const TargetRegisterClass *RC = getRegClass(DstTy, DstReg, MRI);
1489
1490 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001491 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
1492 << " operand\n");
Igor Breger47be5fb2017-08-24 07:06:27 +00001493 return false;
1494 }
1495 }
1496
Igor Breger2661ae42017-09-04 09:06:45 +00001497 if (I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1498 I.setDesc(TII.get(X86::IMPLICIT_DEF));
1499 else
1500 I.setDesc(TII.get(X86::PHI));
1501
Igor Breger47be5fb2017-08-24 07:06:27 +00001502 return true;
1503}
1504
Alexander Ivchenko0bd4d8c2018-03-14 11:23:57 +00001505// Currently GlobalIsel TableGen generates patterns for shift imm and shift 1,
1506// but with shiftCount i8. In G_LSHR/G_ASHR/G_SHL like LLVM-IR both arguments
1507// has the same type, so for now only shift i8 can use auto generated
1508// TableGen patterns.
1509bool X86InstructionSelector::selectShift(MachineInstr &I,
1510 MachineRegisterInfo &MRI,
1511 MachineFunction &MF) const {
1512
1513 assert((I.getOpcode() == TargetOpcode::G_SHL ||
1514 I.getOpcode() == TargetOpcode::G_ASHR ||
1515 I.getOpcode() == TargetOpcode::G_LSHR) &&
1516 "unexpected instruction");
1517
1518 unsigned DstReg = I.getOperand(0).getReg();
1519 const LLT DstTy = MRI.getType(DstReg);
1520 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
1521
1522 const static struct ShiftEntry {
1523 unsigned SizeInBits;
1524 unsigned CReg;
1525 unsigned OpLSHR;
1526 unsigned OpASHR;
1527 unsigned OpSHL;
1528 } OpTable[] = {
1529 {8, X86::CL, X86::SHR8rCL, X86::SAR8rCL, X86::SHL8rCL}, // i8
1530 {16, X86::CX, X86::SHR16rCL, X86::SAR16rCL, X86::SHL16rCL}, // i16
1531 {32, X86::ECX, X86::SHR32rCL, X86::SAR32rCL, X86::SHL32rCL}, // i32
1532 {64, X86::RCX, X86::SHR64rCL, X86::SAR64rCL, X86::SHL64rCL} // i64
1533 };
1534
1535 if (DstRB.getID() != X86::GPRRegBankID)
1536 return false;
1537
1538 auto ShiftEntryIt = std::find_if(
1539 std::begin(OpTable), std::end(OpTable), [DstTy](const ShiftEntry &El) {
1540 return El.SizeInBits == DstTy.getSizeInBits();
1541 });
1542 if (ShiftEntryIt == std::end(OpTable))
1543 return false;
1544
1545 unsigned CReg = ShiftEntryIt->CReg;
1546 unsigned Opcode = 0;
1547 switch (I.getOpcode()) {
1548 case TargetOpcode::G_SHL:
1549 Opcode = ShiftEntryIt->OpSHL;
1550 break;
1551 case TargetOpcode::G_ASHR:
1552 Opcode = ShiftEntryIt->OpASHR;
1553 break;
1554 case TargetOpcode::G_LSHR:
1555 Opcode = ShiftEntryIt->OpLSHR;
1556 break;
1557 default:
1558 return false;
1559 }
1560
1561 unsigned Op0Reg = I.getOperand(1).getReg();
1562 unsigned Op1Reg = I.getOperand(2).getReg();
1563
1564 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),
1565 ShiftEntryIt->CReg)
1566 .addReg(Op1Reg);
1567
1568 // The shift instruction uses X86::CL. If we defined a super-register
1569 // of X86::CL, emit a subreg KILL to precisely describe what we're doing here.
1570 if (CReg != X86::CL)
1571 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::KILL),
1572 X86::CL)
1573 .addReg(CReg, RegState::Kill);
1574
1575 MachineInstr &ShiftInst =
1576 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg)
1577 .addReg(Op0Reg);
1578
1579 constrainSelectedInstRegOperands(ShiftInst, TII, TRI, RBI);
1580 I.eraseFromParent();
1581 return true;
1582}
1583
Alexander Ivchenko86ef9ab2018-03-14 15:41:11 +00001584bool X86InstructionSelector::selectSDiv(MachineInstr &I,
1585 MachineRegisterInfo &MRI,
1586 MachineFunction &MF) const {
1587
1588 assert(I.getOpcode() == TargetOpcode::G_SDIV && "unexpected instruction");
1589
1590 const unsigned DstReg = I.getOperand(0).getReg();
1591 const unsigned DividentReg = I.getOperand(1).getReg();
1592 const unsigned DiviserReg = I.getOperand(2).getReg();
1593
1594 const LLT RegTy = MRI.getType(DstReg);
1595 assert(RegTy == MRI.getType(DividentReg) &&
1596 RegTy == MRI.getType(DiviserReg) &&
1597 "Arguments and return value types must match");
1598
1599 const RegisterBank &RegRB = *RBI.getRegBank(DstReg, MRI, TRI);
1600
1601 // For the X86 IDIV instruction, in most cases the dividend
1602 // (numerator) must be in a specific register pair highreg:lowreg,
1603 // producing the quotient in lowreg and the remainder in highreg.
1604 // For most data types, to set up the instruction, the dividend is
1605 // copied into lowreg, and lowreg is sign-extended into highreg. The
1606 // exception is i8, where the dividend is defined as a single register rather
1607 // than a register pair, and we therefore directly sign-extend the dividend
1608 // into lowreg, instead of copying, and ignore the highreg.
1609 const static struct SDivEntry {
1610 unsigned SizeInBits;
1611 unsigned QuotientReg;
1612 unsigned DividentRegUpper;
1613 unsigned DividentRegLower;
1614 unsigned OpSignExtend;
1615 unsigned OpCopy;
1616 unsigned OpDiv;
1617 } OpTable[] = {
1618 {8, X86::AL, X86::NoRegister, X86::AX, 0, X86::MOVSX16rr8,
1619 X86::IDIV8r}, // i8
1620 {16, X86::AX, X86::DX, X86::AX, X86::CWD, TargetOpcode::COPY,
1621 X86::IDIV16r}, // i16
1622 {32, X86::EAX, X86::EDX, X86::EAX, X86::CDQ, TargetOpcode::COPY,
1623 X86::IDIV32r}, // i32
1624 {64, X86::RAX, X86::RDX, X86::RAX, X86::CQO, TargetOpcode::COPY,
1625 X86::IDIV64r} // i64
1626 };
1627
1628 if (RegRB.getID() != X86::GPRRegBankID)
1629 return false;
1630
1631 auto SDivEntryIt = std::find_if(
1632 std::begin(OpTable), std::end(OpTable), [RegTy](const SDivEntry &El) {
1633 return El.SizeInBits == RegTy.getSizeInBits();
1634 });
1635
1636 if (SDivEntryIt == std::end(OpTable))
1637 return false;
1638
1639 const TargetRegisterClass *RegRC = getRegClass(RegTy, RegRB);
1640 if (!RBI.constrainGenericRegister(DividentReg, *RegRC, MRI) ||
1641 !RBI.constrainGenericRegister(DiviserReg, *RegRC, MRI) ||
1642 !RBI.constrainGenericRegister(DstReg, *RegRC, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001643 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
1644 << " operand\n");
Alexander Ivchenko86ef9ab2018-03-14 15:41:11 +00001645 return false;
1646 }
1647
1648 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SDivEntryIt->OpCopy),
1649 SDivEntryIt->DividentRegLower)
1650 .addReg(DividentReg);
1651 if (SDivEntryIt->DividentRegUpper != X86::NoRegister)
1652 BuildMI(*I.getParent(), I, I.getDebugLoc(),
1653 TII.get(SDivEntryIt->OpSignExtend));
1654 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SDivEntryIt->OpDiv))
1655 .addReg(DiviserReg);
1656 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),
1657 DstReg)
1658 .addReg(SDivEntryIt->QuotientReg);
1659
1660 I.eraseFromParent();
1661 return true;
1662}
1663
Daniel Sanders0b5293f2017-04-06 09:49:34 +00001664InstructionSelector *
Daniel Sanderse7b0d662017-04-21 15:59:56 +00001665llvm::createX86InstructionSelector(const X86TargetMachine &TM,
1666 X86Subtarget &Subtarget,
Daniel Sanders0b5293f2017-04-06 09:49:34 +00001667 X86RegisterBankInfo &RBI) {
Daniel Sanderse7b0d662017-04-21 15:59:56 +00001668 return new X86InstructionSelector(TM, Subtarget, RBI);
Daniel Sanders0b5293f2017-04-06 09:49:34 +00001669}