blob: 0fdd6e379b4ff461a811e441e645bf7ca78d5a62 [file] [log] [blame]
Eugene Zelenko60433b62017-10-05 00:33:50 +00001//===- X86InstructionSelector.cpp -----------------------------------------===//
Igor Bregerf7359d82017-02-22 12:25:09 +00002//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Igor Bregerf7359d82017-02-22 12:25:09 +00006//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// X86.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
Eugene Zelenko60433b62017-10-05 00:33:50 +000014#include "MCTargetDesc/X86BaseInfo.h"
Igor Bregera8ba5722017-03-23 15:25:57 +000015#include "X86InstrBuilder.h"
Igor Bregerf7359d82017-02-22 12:25:09 +000016#include "X86InstrInfo.h"
17#include "X86RegisterBankInfo.h"
18#include "X86RegisterInfo.h"
19#include "X86Subtarget.h"
20#include "X86TargetMachine.h"
Igor Breger3b97ea32017-04-12 12:54:54 +000021#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
Eugene Zelenko60433b62017-10-05 00:33:50 +000022#include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
23#include "llvm/CodeGen/GlobalISel/RegisterBank.h"
Igor Breger28f290f2017-05-17 12:48:08 +000024#include "llvm/CodeGen/GlobalISel/Utils.h"
Igor Bregerf7359d82017-02-22 12:25:09 +000025#include "llvm/CodeGen/MachineBasicBlock.h"
Igor Breger21200ed2017-09-17 08:08:13 +000026#include "llvm/CodeGen/MachineConstantPool.h"
Igor Bregerf7359d82017-02-22 12:25:09 +000027#include "llvm/CodeGen/MachineFunction.h"
28#include "llvm/CodeGen/MachineInstr.h"
29#include "llvm/CodeGen/MachineInstrBuilder.h"
Eugene Zelenko60433b62017-10-05 00:33:50 +000030#include "llvm/CodeGen/MachineMemOperand.h"
Daniel Sanders0b5293f2017-04-06 09:49:34 +000031#include "llvm/CodeGen/MachineOperand.h"
Igor Bregerf7359d82017-02-22 12:25:09 +000032#include "llvm/CodeGen/MachineRegisterInfo.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000033#include "llvm/CodeGen/TargetOpcodes.h"
34#include "llvm/CodeGen/TargetRegisterInfo.h"
Eugene Zelenko60433b62017-10-05 00:33:50 +000035#include "llvm/IR/DataLayout.h"
36#include "llvm/IR/InstrTypes.h"
37#include "llvm/Support/AtomicOrdering.h"
38#include "llvm/Support/CodeGen.h"
Igor Bregerf7359d82017-02-22 12:25:09 +000039#include "llvm/Support/Debug.h"
Eugene Zelenko60433b62017-10-05 00:33:50 +000040#include "llvm/Support/ErrorHandling.h"
41#include "llvm/Support/LowLevelTypeImpl.h"
42#include "llvm/Support/MathExtras.h"
Igor Bregerf7359d82017-02-22 12:25:09 +000043#include "llvm/Support/raw_ostream.h"
Eugene Zelenko60433b62017-10-05 00:33:50 +000044#include <cassert>
45#include <cstdint>
46#include <tuple>
Daniel Sanders6ab0daa2017-07-04 14:35:06 +000047
David Blaikie62651302017-10-26 23:39:54 +000048#define DEBUG_TYPE "X86-isel"
49
Igor Bregerf7359d82017-02-22 12:25:09 +000050using namespace llvm;
51
Daniel Sanders0b5293f2017-04-06 09:49:34 +000052namespace {
53
Daniel Sanderse7b0d662017-04-21 15:59:56 +000054#define GET_GLOBALISEL_PREDICATE_BITSET
55#include "X86GenGlobalISel.inc"
56#undef GET_GLOBALISEL_PREDICATE_BITSET
57
Daniel Sanders0b5293f2017-04-06 09:49:34 +000058class X86InstructionSelector : public InstructionSelector {
59public:
Daniel Sanderse7b0d662017-04-21 15:59:56 +000060 X86InstructionSelector(const X86TargetMachine &TM, const X86Subtarget &STI,
Daniel Sanders0b5293f2017-04-06 09:49:34 +000061 const X86RegisterBankInfo &RBI);
62
Daniel Sandersf76f3152017-11-16 00:46:35 +000063 bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const override;
David Blaikie62651302017-10-26 23:39:54 +000064 static const char *getName() { return DEBUG_TYPE; }
Daniel Sanders0b5293f2017-04-06 09:49:34 +000065
66private:
67 /// tblgen-erated 'select' implementation, used as the initial selector for
68 /// the patterns that don't require complex C++.
Daniel Sandersf76f3152017-11-16 00:46:35 +000069 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
Daniel Sanders0b5293f2017-04-06 09:49:34 +000070
Hiroshi Inouebb703e82017-07-02 03:24:54 +000071 // TODO: remove after supported by Tablegen-erated instruction selection.
Igor Breger21200ed2017-09-17 08:08:13 +000072 unsigned getLoadStoreOp(const LLT &Ty, const RegisterBank &RB, unsigned Opc,
Daniel Sanders0b5293f2017-04-06 09:49:34 +000073 uint64_t Alignment) const;
74
Daniel Sanders0b5293f2017-04-06 09:49:34 +000075 bool selectLoadStoreOp(MachineInstr &I, MachineRegisterInfo &MRI,
76 MachineFunction &MF) const;
Igor Breger810c6252017-05-08 09:40:43 +000077 bool selectFrameIndexOrGep(MachineInstr &I, MachineRegisterInfo &MRI,
78 MachineFunction &MF) const;
Igor Breger717bd362017-07-02 08:58:29 +000079 bool selectGlobalValue(MachineInstr &I, MachineRegisterInfo &MRI,
80 MachineFunction &MF) const;
Igor Breger3b97ea32017-04-12 12:54:54 +000081 bool selectConstant(MachineInstr &I, MachineRegisterInfo &MRI,
82 MachineFunction &MF) const;
Alexander Ivchenko46e07e32018-02-28 09:18:47 +000083 bool selectTruncOrPtrToInt(MachineInstr &I, MachineRegisterInfo &MRI,
84 MachineFunction &MF) const;
Igor Bregerfda31e62017-05-10 06:52:58 +000085 bool selectZext(MachineInstr &I, MachineRegisterInfo &MRI,
86 MachineFunction &MF) const;
Igor Breger1f143642017-09-11 09:41:13 +000087 bool selectAnyext(MachineInstr &I, MachineRegisterInfo &MRI,
88 MachineFunction &MF) const;
Igor Bregerc7b59772017-05-11 07:17:40 +000089 bool selectCmp(MachineInstr &I, MachineRegisterInfo &MRI,
90 MachineFunction &MF) const;
Alexander Ivchenkoa26a3642018-08-31 09:38:27 +000091 bool selectFCmp(MachineInstr &I, MachineRegisterInfo &MRI,
92 MachineFunction &MF) const;
Igor Breger28f290f2017-05-17 12:48:08 +000093 bool selectUadde(MachineInstr &I, MachineRegisterInfo &MRI,
94 MachineFunction &MF) const;
Igor Breger1dcd5e82017-06-20 09:15:10 +000095 bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const;
Igor Bregerb186a692017-07-02 08:15:49 +000096 bool selectUnmergeValues(MachineInstr &I, MachineRegisterInfo &MRI,
Daniel Sandersf76f3152017-11-16 00:46:35 +000097 MachineFunction &MF,
98 CodeGenCoverage &CoverageInfo) const;
Igor Breger0cddd342017-06-29 12:08:28 +000099 bool selectMergeValues(MachineInstr &I, MachineRegisterInfo &MRI,
Daniel Sandersf76f3152017-11-16 00:46:35 +0000100 MachineFunction &MF,
101 CodeGenCoverage &CoverageInfo) const;
Igor Breger1c29be72017-06-22 09:43:35 +0000102 bool selectInsert(MachineInstr &I, MachineRegisterInfo &MRI,
103 MachineFunction &MF) const;
Igor Bregerf5035d62017-06-25 11:42:17 +0000104 bool selectExtract(MachineInstr &I, MachineRegisterInfo &MRI,
105 MachineFunction &MF) const;
Igor Breger685889c2017-08-21 10:51:54 +0000106 bool selectCondBranch(MachineInstr &I, MachineRegisterInfo &MRI,
107 MachineFunction &MF) const;
Alexander Ivchenkoda9e81c2018-02-08 22:41:47 +0000108 bool selectTurnIntoCOPY(MachineInstr &I, MachineRegisterInfo &MRI,
109 const unsigned DstReg,
110 const TargetRegisterClass *DstRC,
111 const unsigned SrcReg,
112 const TargetRegisterClass *SrcRC) const;
Igor Breger21200ed2017-09-17 08:08:13 +0000113 bool materializeFP(MachineInstr &I, MachineRegisterInfo &MRI,
114 MachineFunction &MF) const;
Igor Breger2661ae42017-09-04 09:06:45 +0000115 bool selectImplicitDefOrPHI(MachineInstr &I, MachineRegisterInfo &MRI) const;
Alexander Ivchenko0bd4d8c2018-03-14 11:23:57 +0000116 bool selectShift(MachineInstr &I, MachineRegisterInfo &MRI,
117 MachineFunction &MF) const;
Alexander Ivchenko1aedf202018-10-08 13:40:34 +0000118 bool selectDivRem(MachineInstr &I, MachineRegisterInfo &MRI,
119 MachineFunction &MF) const;
Alexander Ivchenko58a5d6f2018-08-31 11:05:13 +0000120 bool selectIntrinsicWSideEffects(MachineInstr &I, MachineRegisterInfo &MRI,
121 MachineFunction &MF) const;
Igor Breger1c29be72017-06-22 09:43:35 +0000122
123 // emit insert subreg instruction and insert it before MachineInstr &I
124 bool emitInsertSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I,
125 MachineRegisterInfo &MRI, MachineFunction &MF) const;
Igor Bregerf5035d62017-06-25 11:42:17 +0000126 // emit extract subreg instruction and insert it before MachineInstr &I
127 bool emitExtractSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I,
128 MachineRegisterInfo &MRI, MachineFunction &MF) const;
Igor Breger1dcd5e82017-06-20 09:15:10 +0000129
130 const TargetRegisterClass *getRegClass(LLT Ty, const RegisterBank &RB) const;
131 const TargetRegisterClass *getRegClass(LLT Ty, unsigned Reg,
132 MachineRegisterInfo &MRI) const;
Igor Breger28f290f2017-05-17 12:48:08 +0000133
Daniel Sanderse7b0d662017-04-21 15:59:56 +0000134 const X86TargetMachine &TM;
Daniel Sanders0b5293f2017-04-06 09:49:34 +0000135 const X86Subtarget &STI;
136 const X86InstrInfo &TII;
137 const X86RegisterInfo &TRI;
138 const X86RegisterBankInfo &RBI;
Daniel Sanderse7b0d662017-04-21 15:59:56 +0000139
Daniel Sanderse9fdba32017-04-29 17:30:09 +0000140#define GET_GLOBALISEL_PREDICATES_DECL
141#include "X86GenGlobalISel.inc"
142#undef GET_GLOBALISEL_PREDICATES_DECL
Daniel Sanders0b5293f2017-04-06 09:49:34 +0000143
144#define GET_GLOBALISEL_TEMPORARIES_DECL
145#include "X86GenGlobalISel.inc"
146#undef GET_GLOBALISEL_TEMPORARIES_DECL
147};
148
149} // end anonymous namespace
150
Daniel Sanders8a4bae92017-03-14 21:32:08 +0000151#define GET_GLOBALISEL_IMPL
Igor Bregerf7359d82017-02-22 12:25:09 +0000152#include "X86GenGlobalISel.inc"
Daniel Sanders8a4bae92017-03-14 21:32:08 +0000153#undef GET_GLOBALISEL_IMPL
Igor Bregerf7359d82017-02-22 12:25:09 +0000154
Daniel Sanderse7b0d662017-04-21 15:59:56 +0000155X86InstructionSelector::X86InstructionSelector(const X86TargetMachine &TM,
156 const X86Subtarget &STI,
Igor Bregerf7359d82017-02-22 12:25:09 +0000157 const X86RegisterBankInfo &RBI)
Daniel Sanderse7b0d662017-04-21 15:59:56 +0000158 : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
Daniel Sanderse9fdba32017-04-29 17:30:09 +0000159 TRI(*STI.getRegisterInfo()), RBI(RBI),
160#define GET_GLOBALISEL_PREDICATES_INIT
161#include "X86GenGlobalISel.inc"
162#undef GET_GLOBALISEL_PREDICATES_INIT
Daniel Sanders8a4bae92017-03-14 21:32:08 +0000163#define GET_GLOBALISEL_TEMPORARIES_INIT
164#include "X86GenGlobalISel.inc"
165#undef GET_GLOBALISEL_TEMPORARIES_INIT
166{
167}
Igor Bregerf7359d82017-02-22 12:25:09 +0000168
169// FIXME: This should be target-independent, inferred from the types declared
170// for each class in the bank.
Igor Breger1dcd5e82017-06-20 09:15:10 +0000171const TargetRegisterClass *
172X86InstructionSelector::getRegClass(LLT Ty, const RegisterBank &RB) const {
Igor Bregerf7359d82017-02-22 12:25:09 +0000173 if (RB.getID() == X86::GPRRegBankID) {
Igor Breger4fdf1e42017-04-19 11:34:59 +0000174 if (Ty.getSizeInBits() <= 8)
175 return &X86::GR8RegClass;
176 if (Ty.getSizeInBits() == 16)
177 return &X86::GR16RegClass;
Igor Breger321cf3c2017-03-03 08:06:46 +0000178 if (Ty.getSizeInBits() == 32)
Igor Bregerf7359d82017-02-22 12:25:09 +0000179 return &X86::GR32RegClass;
180 if (Ty.getSizeInBits() == 64)
181 return &X86::GR64RegClass;
182 }
Igor Breger321cf3c2017-03-03 08:06:46 +0000183 if (RB.getID() == X86::VECRRegBankID) {
184 if (Ty.getSizeInBits() == 32)
Igor Breger1dcd5e82017-06-20 09:15:10 +0000185 return STI.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
Igor Breger321cf3c2017-03-03 08:06:46 +0000186 if (Ty.getSizeInBits() == 64)
Igor Breger1dcd5e82017-06-20 09:15:10 +0000187 return STI.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
Igor Breger321cf3c2017-03-03 08:06:46 +0000188 if (Ty.getSizeInBits() == 128)
Igor Breger1dcd5e82017-06-20 09:15:10 +0000189 return STI.hasAVX512() ? &X86::VR128XRegClass : &X86::VR128RegClass;
Igor Breger321cf3c2017-03-03 08:06:46 +0000190 if (Ty.getSizeInBits() == 256)
Igor Breger1dcd5e82017-06-20 09:15:10 +0000191 return STI.hasAVX512() ? &X86::VR256XRegClass : &X86::VR256RegClass;
Igor Breger321cf3c2017-03-03 08:06:46 +0000192 if (Ty.getSizeInBits() == 512)
193 return &X86::VR512RegClass;
194 }
Igor Bregerf7359d82017-02-22 12:25:09 +0000195
196 llvm_unreachable("Unknown RegBank!");
197}
198
Igor Breger1dcd5e82017-06-20 09:15:10 +0000199const TargetRegisterClass *
200X86InstructionSelector::getRegClass(LLT Ty, unsigned Reg,
201 MachineRegisterInfo &MRI) const {
202 const RegisterBank &RegBank = *RBI.getRegBank(Reg, MRI, TRI);
203 return getRegClass(Ty, RegBank);
204}
205
Benjamin Kramer49a49fe2017-08-20 13:03:48 +0000206static unsigned getSubRegIndex(const TargetRegisterClass *RC) {
Igor Bregerb3a860a2017-08-20 07:14:40 +0000207 unsigned SubIdx = X86::NoSubRegister;
208 if (RC == &X86::GR32RegClass) {
209 SubIdx = X86::sub_32bit;
210 } else if (RC == &X86::GR16RegClass) {
211 SubIdx = X86::sub_16bit;
212 } else if (RC == &X86::GR8RegClass) {
213 SubIdx = X86::sub_8bit;
214 }
215
216 return SubIdx;
217}
218
Benjamin Kramer49a49fe2017-08-20 13:03:48 +0000219static const TargetRegisterClass *getRegClassFromGRPhysReg(unsigned Reg) {
Igor Bregerb3a860a2017-08-20 07:14:40 +0000220 assert(TargetRegisterInfo::isPhysicalRegister(Reg));
221 if (X86::GR64RegClass.contains(Reg))
222 return &X86::GR64RegClass;
223 if (X86::GR32RegClass.contains(Reg))
224 return &X86::GR32RegClass;
225 if (X86::GR16RegClass.contains(Reg))
226 return &X86::GR16RegClass;
227 if (X86::GR8RegClass.contains(Reg))
228 return &X86::GR8RegClass;
229
230 llvm_unreachable("Unknown RegClass for PhysReg!");
231}
232
Igor Bregerf7359d82017-02-22 12:25:09 +0000233// Set X86 Opcode and constrain DestReg.
Igor Breger1dcd5e82017-06-20 09:15:10 +0000234bool X86InstructionSelector::selectCopy(MachineInstr &I,
235 MachineRegisterInfo &MRI) const {
Igor Bregerf7359d82017-02-22 12:25:09 +0000236 unsigned DstReg = I.getOperand(0).getReg();
Igor Bregerb3a860a2017-08-20 07:14:40 +0000237 const unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
238 const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
Igor Bregerf7359d82017-02-22 12:25:09 +0000239
Igor Bregerf7359d82017-02-22 12:25:09 +0000240 unsigned SrcReg = I.getOperand(1).getReg();
241 const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
Igor Bregerb3a860a2017-08-20 07:14:40 +0000242 const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
243
244 if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
245 assert(I.isCopy() && "Generic operators do not allow physical registers");
246
247 if (DstSize > SrcSize && SrcRegBank.getID() == X86::GPRRegBankID &&
248 DstRegBank.getID() == X86::GPRRegBankID) {
249
250 const TargetRegisterClass *SrcRC =
251 getRegClass(MRI.getType(SrcReg), SrcRegBank);
252 const TargetRegisterClass *DstRC = getRegClassFromGRPhysReg(DstReg);
253
254 if (SrcRC != DstRC) {
255 // This case can be generated by ABI lowering, performe anyext
256 unsigned ExtSrc = MRI.createVirtualRegister(DstRC);
257 BuildMI(*I.getParent(), I, I.getDebugLoc(),
258 TII.get(TargetOpcode::SUBREG_TO_REG))
259 .addDef(ExtSrc)
260 .addImm(0)
261 .addReg(SrcReg)
262 .addImm(getSubRegIndex(SrcRC));
263
264 I.getOperand(1).setReg(ExtSrc);
265 }
266 }
267
268 return true;
269 }
Igor Breger360d0f22017-04-27 08:02:03 +0000270
Igor Bregerf7359d82017-02-22 12:25:09 +0000271 assert((!TargetRegisterInfo::isPhysicalRegister(SrcReg) || I.isCopy()) &&
272 "No phys reg on generic operators");
273 assert((DstSize == SrcSize ||
274 // Copies are a mean to setup initial types, the number of
275 // bits may not exactly match.
276 (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
277 DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI))) &&
278 "Copy with different width?!");
279
Igor Bregerb3a860a2017-08-20 07:14:40 +0000280 const TargetRegisterClass *DstRC =
281 getRegClass(MRI.getType(DstReg), DstRegBank);
Igor Bregerf7359d82017-02-22 12:25:09 +0000282
Igor Bregerb3a860a2017-08-20 07:14:40 +0000283 if (SrcRegBank.getID() == X86::GPRRegBankID &&
284 DstRegBank.getID() == X86::GPRRegBankID && SrcSize > DstSize &&
285 TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
286 // Change the physical register to performe truncate.
Igor Breger360d0f22017-04-27 08:02:03 +0000287
Igor Bregerb3a860a2017-08-20 07:14:40 +0000288 const TargetRegisterClass *SrcRC = getRegClassFromGRPhysReg(SrcReg);
Igor Breger360d0f22017-04-27 08:02:03 +0000289
Igor Bregerb3a860a2017-08-20 07:14:40 +0000290 if (DstRC != SrcRC) {
291 I.getOperand(1).setSubReg(getSubRegIndex(DstRC));
Igor Breger360d0f22017-04-27 08:02:03 +0000292 I.getOperand(1).substPhysReg(SrcReg, TRI);
293 }
Igor Bregerf7359d82017-02-22 12:25:09 +0000294 }
295
296 // No need to constrain SrcReg. It will get constrained when
297 // we hit another of its use or its defs.
298 // Copies do not have constraints.
Igor Breger8a924be2017-03-23 12:13:29 +0000299 const TargetRegisterClass *OldRC = MRI.getRegClassOrNull(DstReg);
Igor Bregerb3a860a2017-08-20 07:14:40 +0000300 if (!OldRC || !DstRC->hasSubClassEq(OldRC)) {
301 if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000302 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
303 << " operand\n");
Igor Breger8a924be2017-03-23 12:13:29 +0000304 return false;
305 }
Igor Bregerf7359d82017-02-22 12:25:09 +0000306 }
307 I.setDesc(TII.get(X86::COPY));
308 return true;
309}
310
Daniel Sandersf76f3152017-11-16 00:46:35 +0000311bool X86InstructionSelector::select(MachineInstr &I,
312 CodeGenCoverage &CoverageInfo) const {
Igor Bregerf7359d82017-02-22 12:25:09 +0000313 assert(I.getParent() && "Instruction should be in a basic block!");
314 assert(I.getParent()->getParent() && "Instruction should be in a function!");
315
316 MachineBasicBlock &MBB = *I.getParent();
317 MachineFunction &MF = *MBB.getParent();
318 MachineRegisterInfo &MRI = MF.getRegInfo();
319
320 unsigned Opcode = I.getOpcode();
321 if (!isPreISelGenericOpcode(Opcode)) {
322 // Certain non-generic instructions also need some special handling.
323
Igor Breger03c22082017-08-21 09:17:28 +0000324 if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
325 return false;
Igor Breger03c22082017-08-21 09:17:28 +0000326
Igor Bregerf7359d82017-02-22 12:25:09 +0000327 if (I.isCopy())
Igor Breger1dcd5e82017-06-20 09:15:10 +0000328 return selectCopy(I, MRI);
Igor Bregerf7359d82017-02-22 12:25:09 +0000329
Igor Bregerf7359d82017-02-22 12:25:09 +0000330 return true;
331 }
332
Benjamin Kramer5a7e0f82017-02-22 12:59:47 +0000333 assert(I.getNumOperands() == I.getNumExplicitOperands() &&
334 "Generic instruction has unexpected implicit operands\n");
Igor Bregerf7359d82017-02-22 12:25:09 +0000335
Daniel Sandersf76f3152017-11-16 00:46:35 +0000336 if (selectImpl(I, CoverageInfo))
Igor Bregerfda31e62017-05-10 06:52:58 +0000337 return true;
Igor Breger2452ef02017-05-01 07:06:08 +0000338
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000339 LLVM_DEBUG(dbgs() << " C++ instruction selection: "; I.print(dbgs()));
Igor Breger2452ef02017-05-01 07:06:08 +0000340
341 // TODO: This should be implemented by tblgen.
Igor Breger06335bb2017-09-17 14:02:19 +0000342 switch (I.getOpcode()) {
343 default:
344 return false;
345 case TargetOpcode::G_STORE:
346 case TargetOpcode::G_LOAD:
347 return selectLoadStoreOp(I, MRI, MF);
348 case TargetOpcode::G_GEP:
349 case TargetOpcode::G_FRAME_INDEX:
350 return selectFrameIndexOrGep(I, MRI, MF);
351 case TargetOpcode::G_GLOBAL_VALUE:
352 return selectGlobalValue(I, MRI, MF);
353 case TargetOpcode::G_CONSTANT:
354 return selectConstant(I, MRI, MF);
355 case TargetOpcode::G_FCONSTANT:
356 return materializeFP(I, MRI, MF);
Alexander Ivchenko46e07e32018-02-28 09:18:47 +0000357 case TargetOpcode::G_PTRTOINT:
Igor Breger06335bb2017-09-17 14:02:19 +0000358 case TargetOpcode::G_TRUNC:
Alexander Ivchenko46e07e32018-02-28 09:18:47 +0000359 return selectTruncOrPtrToInt(I, MRI, MF);
Alexander Ivchenkoc01f7502018-02-28 12:11:53 +0000360 case TargetOpcode::G_INTTOPTR:
361 return selectCopy(I, MRI);
Igor Breger06335bb2017-09-17 14:02:19 +0000362 case TargetOpcode::G_ZEXT:
363 return selectZext(I, MRI, MF);
364 case TargetOpcode::G_ANYEXT:
365 return selectAnyext(I, MRI, MF);
366 case TargetOpcode::G_ICMP:
367 return selectCmp(I, MRI, MF);
Alexander Ivchenkoa26a3642018-08-31 09:38:27 +0000368 case TargetOpcode::G_FCMP:
369 return selectFCmp(I, MRI, MF);
Igor Breger06335bb2017-09-17 14:02:19 +0000370 case TargetOpcode::G_UADDE:
371 return selectUadde(I, MRI, MF);
372 case TargetOpcode::G_UNMERGE_VALUES:
Daniel Sandersf76f3152017-11-16 00:46:35 +0000373 return selectUnmergeValues(I, MRI, MF, CoverageInfo);
Igor Breger06335bb2017-09-17 14:02:19 +0000374 case TargetOpcode::G_MERGE_VALUES:
Amara Emerson5ec14602018-12-10 18:44:58 +0000375 case TargetOpcode::G_CONCAT_VECTORS:
Daniel Sandersf76f3152017-11-16 00:46:35 +0000376 return selectMergeValues(I, MRI, MF, CoverageInfo);
Igor Breger06335bb2017-09-17 14:02:19 +0000377 case TargetOpcode::G_EXTRACT:
378 return selectExtract(I, MRI, MF);
379 case TargetOpcode::G_INSERT:
380 return selectInsert(I, MRI, MF);
381 case TargetOpcode::G_BRCOND:
382 return selectCondBranch(I, MRI, MF);
383 case TargetOpcode::G_IMPLICIT_DEF:
384 case TargetOpcode::G_PHI:
385 return selectImplicitDefOrPHI(I, MRI);
Alexander Ivchenko0bd4d8c2018-03-14 11:23:57 +0000386 case TargetOpcode::G_SHL:
387 case TargetOpcode::G_ASHR:
388 case TargetOpcode::G_LSHR:
389 return selectShift(I, MRI, MF);
Alexander Ivchenko86ef9ab2018-03-14 15:41:11 +0000390 case TargetOpcode::G_SDIV:
Alexander Ivchenko1aedf202018-10-08 13:40:34 +0000391 case TargetOpcode::G_UDIV:
392 case TargetOpcode::G_SREM:
393 case TargetOpcode::G_UREM:
394 return selectDivRem(I, MRI, MF);
Alexander Ivchenko58a5d6f2018-08-31 11:05:13 +0000395 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
396 return selectIntrinsicWSideEffects(I, MRI, MF);
Igor Breger06335bb2017-09-17 14:02:19 +0000397 }
Igor Breger321cf3c2017-03-03 08:06:46 +0000398
Igor Breger2452ef02017-05-01 07:06:08 +0000399 return false;
Igor Bregerf7359d82017-02-22 12:25:09 +0000400}
Igor Breger321cf3c2017-03-03 08:06:46 +0000401
Igor Breger21200ed2017-09-17 08:08:13 +0000402unsigned X86InstructionSelector::getLoadStoreOp(const LLT &Ty,
403 const RegisterBank &RB,
Igor Bregera8ba5722017-03-23 15:25:57 +0000404 unsigned Opc,
405 uint64_t Alignment) const {
406 bool Isload = (Opc == TargetOpcode::G_LOAD);
407 bool HasAVX = STI.hasAVX();
408 bool HasAVX512 = STI.hasAVX512();
409 bool HasVLX = STI.hasVLX();
410
411 if (Ty == LLT::scalar(8)) {
412 if (X86::GPRRegBankID == RB.getID())
413 return Isload ? X86::MOV8rm : X86::MOV8mr;
414 } else if (Ty == LLT::scalar(16)) {
415 if (X86::GPRRegBankID == RB.getID())
416 return Isload ? X86::MOV16rm : X86::MOV16mr;
Igor Bregera9edb882017-05-01 06:08:32 +0000417 } else if (Ty == LLT::scalar(32) || Ty == LLT::pointer(0, 32)) {
Igor Bregera8ba5722017-03-23 15:25:57 +0000418 if (X86::GPRRegBankID == RB.getID())
419 return Isload ? X86::MOV32rm : X86::MOV32mr;
420 if (X86::VECRRegBankID == RB.getID())
421 return Isload ? (HasAVX512 ? X86::VMOVSSZrm
422 : HasAVX ? X86::VMOVSSrm : X86::MOVSSrm)
423 : (HasAVX512 ? X86::VMOVSSZmr
424 : HasAVX ? X86::VMOVSSmr : X86::MOVSSmr);
Igor Bregera9edb882017-05-01 06:08:32 +0000425 } else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) {
Igor Bregera8ba5722017-03-23 15:25:57 +0000426 if (X86::GPRRegBankID == RB.getID())
427 return Isload ? X86::MOV64rm : X86::MOV64mr;
428 if (X86::VECRRegBankID == RB.getID())
429 return Isload ? (HasAVX512 ? X86::VMOVSDZrm
430 : HasAVX ? X86::VMOVSDrm : X86::MOVSDrm)
431 : (HasAVX512 ? X86::VMOVSDZmr
432 : HasAVX ? X86::VMOVSDmr : X86::MOVSDmr);
433 } else if (Ty.isVector() && Ty.getSizeInBits() == 128) {
434 if (Alignment >= 16)
435 return Isload ? (HasVLX ? X86::VMOVAPSZ128rm
436 : HasAVX512
437 ? X86::VMOVAPSZ128rm_NOVLX
438 : HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm)
439 : (HasVLX ? X86::VMOVAPSZ128mr
440 : HasAVX512
441 ? X86::VMOVAPSZ128mr_NOVLX
442 : HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
443 else
444 return Isload ? (HasVLX ? X86::VMOVUPSZ128rm
445 : HasAVX512
446 ? X86::VMOVUPSZ128rm_NOVLX
447 : HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm)
448 : (HasVLX ? X86::VMOVUPSZ128mr
449 : HasAVX512
450 ? X86::VMOVUPSZ128mr_NOVLX
451 : HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
Igor Breger617be6e2017-05-23 08:23:51 +0000452 } else if (Ty.isVector() && Ty.getSizeInBits() == 256) {
453 if (Alignment >= 32)
454 return Isload ? (HasVLX ? X86::VMOVAPSZ256rm
455 : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
456 : X86::VMOVAPSYrm)
457 : (HasVLX ? X86::VMOVAPSZ256mr
458 : HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX
459 : X86::VMOVAPSYmr);
460 else
461 return Isload ? (HasVLX ? X86::VMOVUPSZ256rm
462 : HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX
463 : X86::VMOVUPSYrm)
464 : (HasVLX ? X86::VMOVUPSZ256mr
465 : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
466 : X86::VMOVUPSYmr);
467 } else if (Ty.isVector() && Ty.getSizeInBits() == 512) {
468 if (Alignment >= 64)
469 return Isload ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
470 else
471 return Isload ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
Igor Bregera8ba5722017-03-23 15:25:57 +0000472 }
473 return Opc;
474}
475
Igor Bregerbd2deda2017-06-19 13:12:57 +0000476// Fill in an address from the given instruction.
Benjamin Kramer49a49fe2017-08-20 13:03:48 +0000477static void X86SelectAddress(const MachineInstr &I,
478 const MachineRegisterInfo &MRI,
479 X86AddressMode &AM) {
Igor Bregerbd2deda2017-06-19 13:12:57 +0000480 assert(I.getOperand(0).isReg() && "unsupported opperand.");
481 assert(MRI.getType(I.getOperand(0).getReg()).isPointer() &&
482 "unsupported type.");
483
484 if (I.getOpcode() == TargetOpcode::G_GEP) {
485 if (auto COff = getConstantVRegVal(I.getOperand(2).getReg(), MRI)) {
486 int64_t Imm = *COff;
487 if (isInt<32>(Imm)) { // Check for displacement overflow.
488 AM.Disp = static_cast<int32_t>(Imm);
489 AM.Base.Reg = I.getOperand(1).getReg();
490 return;
491 }
492 }
493 } else if (I.getOpcode() == TargetOpcode::G_FRAME_INDEX) {
494 AM.Base.FrameIndex = I.getOperand(1).getIndex();
495 AM.BaseType = X86AddressMode::FrameIndexBase;
496 return;
497 }
498
499 // Default behavior.
500 AM.Base.Reg = I.getOperand(0).getReg();
Igor Bregerbd2deda2017-06-19 13:12:57 +0000501}
502
Igor Bregera8ba5722017-03-23 15:25:57 +0000503bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,
504 MachineRegisterInfo &MRI,
505 MachineFunction &MF) const {
Igor Bregera8ba5722017-03-23 15:25:57 +0000506 unsigned Opc = I.getOpcode();
507
Igor Breger06335bb2017-09-17 14:02:19 +0000508 assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&
509 "unexpected instruction");
Igor Bregera8ba5722017-03-23 15:25:57 +0000510
511 const unsigned DefReg = I.getOperand(0).getReg();
512 LLT Ty = MRI.getType(DefReg);
513 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
514
515 auto &MemOp = **I.memoperands_begin();
Daniel Sanders3c1c4c02017-12-05 05:52:07 +0000516 if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000517 LLVM_DEBUG(dbgs() << "Atomic load/store not supported yet\n");
Daniel Sanders3c1c4c02017-12-05 05:52:07 +0000518 return false;
519 }
520
Igor Bregera8ba5722017-03-23 15:25:57 +0000521 unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc, MemOp.getAlignment());
522 if (NewOpc == Opc)
523 return false;
524
Igor Bregerbd2deda2017-06-19 13:12:57 +0000525 X86AddressMode AM;
526 X86SelectAddress(*MRI.getVRegDef(I.getOperand(1).getReg()), MRI, AM);
527
Igor Bregera8ba5722017-03-23 15:25:57 +0000528 I.setDesc(TII.get(NewOpc));
529 MachineInstrBuilder MIB(MF, I);
Igor Bregerbd2deda2017-06-19 13:12:57 +0000530 if (Opc == TargetOpcode::G_LOAD) {
531 I.RemoveOperand(1);
532 addFullAddress(MIB, AM);
533 } else {
Igor Bregera8ba5722017-03-23 15:25:57 +0000534 // G_STORE (VAL, Addr), X86Store instruction (Addr, VAL)
Igor Bregerbd2deda2017-06-19 13:12:57 +0000535 I.RemoveOperand(1);
Igor Bregera8ba5722017-03-23 15:25:57 +0000536 I.RemoveOperand(0);
Igor Bregerbd2deda2017-06-19 13:12:57 +0000537 addFullAddress(MIB, AM).addUse(DefReg);
Igor Bregera8ba5722017-03-23 15:25:57 +0000538 }
539 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
540}
541
Igor Breger717bd362017-07-02 08:58:29 +0000542static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI) {
543 if (Ty == LLT::pointer(0, 64))
544 return X86::LEA64r;
545 else if (Ty == LLT::pointer(0, 32))
546 return STI.isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r;
547 else
548 llvm_unreachable("Can't get LEA opcode. Unsupported type.");
549}
550
Igor Breger810c6252017-05-08 09:40:43 +0000551bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I,
552 MachineRegisterInfo &MRI,
553 MachineFunction &MF) const {
554 unsigned Opc = I.getOpcode();
555
Igor Breger06335bb2017-09-17 14:02:19 +0000556 assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_GEP) &&
557 "unexpected instruction");
Igor Breger531a2032017-03-26 08:11:12 +0000558
559 const unsigned DefReg = I.getOperand(0).getReg();
560 LLT Ty = MRI.getType(DefReg);
561
Igor Breger810c6252017-05-08 09:40:43 +0000562 // Use LEA to calculate frame index and GEP
Igor Breger717bd362017-07-02 08:58:29 +0000563 unsigned NewOpc = getLeaOP(Ty, STI);
Igor Breger531a2032017-03-26 08:11:12 +0000564 I.setDesc(TII.get(NewOpc));
565 MachineInstrBuilder MIB(MF, I);
Igor Breger810c6252017-05-08 09:40:43 +0000566
567 if (Opc == TargetOpcode::G_FRAME_INDEX) {
568 addOffset(MIB, 0);
569 } else {
570 MachineOperand &InxOp = I.getOperand(2);
571 I.addOperand(InxOp); // set IndexReg
572 InxOp.ChangeToImmediate(1); // set Scale
573 MIB.addImm(0).addReg(0);
574 }
Igor Breger531a2032017-03-26 08:11:12 +0000575
576 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
577}
Daniel Sanders0b5293f2017-04-06 09:49:34 +0000578
Igor Breger717bd362017-07-02 08:58:29 +0000579bool X86InstructionSelector::selectGlobalValue(MachineInstr &I,
580 MachineRegisterInfo &MRI,
581 MachineFunction &MF) const {
Igor Breger06335bb2017-09-17 14:02:19 +0000582 assert((I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE) &&
583 "unexpected instruction");
Igor Breger717bd362017-07-02 08:58:29 +0000584
585 auto GV = I.getOperand(1).getGlobal();
586 if (GV->isThreadLocal()) {
587 return false; // TODO: we don't support TLS yet.
588 }
589
590 // Can't handle alternate code models yet.
591 if (TM.getCodeModel() != CodeModel::Small)
Eugene Zelenko60433b62017-10-05 00:33:50 +0000592 return false;
Igor Breger717bd362017-07-02 08:58:29 +0000593
594 X86AddressMode AM;
595 AM.GV = GV;
596 AM.GVOpFlags = STI.classifyGlobalReference(GV);
597
598 // TODO: The ABI requires an extra load. not supported yet.
599 if (isGlobalStubReference(AM.GVOpFlags))
600 return false;
601
602 // TODO: This reference is relative to the pic base. not supported yet.
603 if (isGlobalRelativeToPICBase(AM.GVOpFlags))
604 return false;
605
606 if (STI.isPICStyleRIPRel()) {
607 // Use rip-relative addressing.
608 assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
609 AM.Base.Reg = X86::RIP;
610 }
611
612 const unsigned DefReg = I.getOperand(0).getReg();
613 LLT Ty = MRI.getType(DefReg);
614 unsigned NewOpc = getLeaOP(Ty, STI);
615
616 I.setDesc(TII.get(NewOpc));
617 MachineInstrBuilder MIB(MF, I);
618
619 I.RemoveOperand(1);
620 addFullAddress(MIB, AM);
621
622 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
623}
624
Igor Breger3b97ea32017-04-12 12:54:54 +0000625bool X86InstructionSelector::selectConstant(MachineInstr &I,
626 MachineRegisterInfo &MRI,
627 MachineFunction &MF) const {
Igor Breger06335bb2017-09-17 14:02:19 +0000628 assert((I.getOpcode() == TargetOpcode::G_CONSTANT) &&
629 "unexpected instruction");
Igor Breger3b97ea32017-04-12 12:54:54 +0000630
631 const unsigned DefReg = I.getOperand(0).getReg();
632 LLT Ty = MRI.getType(DefReg);
633
Igor Breger5c787ab2017-07-03 11:06:54 +0000634 if (RBI.getRegBank(DefReg, MRI, TRI)->getID() != X86::GPRRegBankID)
635 return false;
Igor Breger3b97ea32017-04-12 12:54:54 +0000636
637 uint64_t Val = 0;
638 if (I.getOperand(1).isCImm()) {
639 Val = I.getOperand(1).getCImm()->getZExtValue();
640 I.getOperand(1).ChangeToImmediate(Val);
641 } else if (I.getOperand(1).isImm()) {
642 Val = I.getOperand(1).getImm();
643 } else
644 llvm_unreachable("Unsupported operand type.");
645
646 unsigned NewOpc;
647 switch (Ty.getSizeInBits()) {
648 case 8:
649 NewOpc = X86::MOV8ri;
650 break;
651 case 16:
652 NewOpc = X86::MOV16ri;
653 break;
654 case 32:
655 NewOpc = X86::MOV32ri;
656 break;
Eugene Zelenko60433b62017-10-05 00:33:50 +0000657 case 64:
Igor Breger3b97ea32017-04-12 12:54:54 +0000658 // TODO: in case isUInt<32>(Val), X86::MOV32ri can be used
659 if (isInt<32>(Val))
660 NewOpc = X86::MOV64ri32;
661 else
662 NewOpc = X86::MOV64ri;
663 break;
Igor Breger3b97ea32017-04-12 12:54:54 +0000664 default:
665 llvm_unreachable("Can't select G_CONSTANT, unsupported type.");
666 }
667
668 I.setDesc(TII.get(NewOpc));
669 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
670}
671
Alexander Ivchenko46e07e32018-02-28 09:18:47 +0000672// Helper function for selectTruncOrPtrToInt and selectAnyext.
Alexander Ivchenkoda9e81c2018-02-08 22:41:47 +0000673// Returns true if DstRC lives on a floating register class and
674// SrcRC lives on a 128-bit vector class.
675static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC,
676 const TargetRegisterClass *SrcRC) {
677 return (DstRC == &X86::FR32RegClass || DstRC == &X86::FR32XRegClass ||
678 DstRC == &X86::FR64RegClass || DstRC == &X86::FR64XRegClass) &&
679 (SrcRC == &X86::VR128RegClass || SrcRC == &X86::VR128XRegClass);
680}
681
682bool X86InstructionSelector::selectTurnIntoCOPY(
683 MachineInstr &I, MachineRegisterInfo &MRI, const unsigned DstReg,
684 const TargetRegisterClass *DstRC, const unsigned SrcReg,
685 const TargetRegisterClass *SrcRC) const {
686
687 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
688 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000689 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
690 << " operand\n");
Alexander Ivchenkoda9e81c2018-02-08 22:41:47 +0000691 return false;
692 }
693 I.setDesc(TII.get(X86::COPY));
694 return true;
695}
696
Alexander Ivchenko46e07e32018-02-28 09:18:47 +0000697bool X86InstructionSelector::selectTruncOrPtrToInt(MachineInstr &I,
698 MachineRegisterInfo &MRI,
699 MachineFunction &MF) const {
700 assert((I.getOpcode() == TargetOpcode::G_TRUNC ||
701 I.getOpcode() == TargetOpcode::G_PTRTOINT) &&
702 "unexpected instruction");
Igor Breger4fdf1e42017-04-19 11:34:59 +0000703
704 const unsigned DstReg = I.getOperand(0).getReg();
705 const unsigned SrcReg = I.getOperand(1).getReg();
706
707 const LLT DstTy = MRI.getType(DstReg);
708 const LLT SrcTy = MRI.getType(SrcReg);
709
710 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
711 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
712
713 if (DstRB.getID() != SrcRB.getID()) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000714 LLVM_DEBUG(dbgs() << TII.getName(I.getOpcode())
715 << " input/output on different banks\n");
Igor Breger4fdf1e42017-04-19 11:34:59 +0000716 return false;
717 }
718
Igor Breger1dcd5e82017-06-20 09:15:10 +0000719 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
Alexander Ivchenkoda9e81c2018-02-08 22:41:47 +0000720 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
721
722 if (!DstRC || !SrcRC)
Igor Breger4fdf1e42017-04-19 11:34:59 +0000723 return false;
724
Alexander Ivchenkoda9e81c2018-02-08 22:41:47 +0000725 // If that's truncation of the value that lives on the vector class and goes
726 // into the floating class, just replace it with copy, as we are able to
727 // select it as a regular move.
728 if (canTurnIntoCOPY(DstRC, SrcRC))
729 return selectTurnIntoCOPY(I, MRI, DstReg, DstRC, SrcReg, SrcRC);
730
731 if (DstRB.getID() != X86::GPRRegBankID)
Igor Breger4fdf1e42017-04-19 11:34:59 +0000732 return false;
733
Igor Breger014fc562017-05-21 11:13:56 +0000734 unsigned SubIdx;
735 if (DstRC == SrcRC) {
736 // Nothing to be done
737 SubIdx = X86::NoSubRegister;
738 } else if (DstRC == &X86::GR32RegClass) {
739 SubIdx = X86::sub_32bit;
740 } else if (DstRC == &X86::GR16RegClass) {
741 SubIdx = X86::sub_16bit;
742 } else if (DstRC == &X86::GR8RegClass) {
743 SubIdx = X86::sub_8bit;
744 } else {
745 return false;
746 }
747
748 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
749
Igor Breger4fdf1e42017-04-19 11:34:59 +0000750 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
751 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000752 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
753 << "\n");
Igor Breger4fdf1e42017-04-19 11:34:59 +0000754 return false;
755 }
756
Igor Breger014fc562017-05-21 11:13:56 +0000757 I.getOperand(1).setSubReg(SubIdx);
Igor Breger4fdf1e42017-04-19 11:34:59 +0000758
759 I.setDesc(TII.get(X86::COPY));
760 return true;
761}
762
Igor Bregerfda31e62017-05-10 06:52:58 +0000763bool X86InstructionSelector::selectZext(MachineInstr &I,
764 MachineRegisterInfo &MRI,
765 MachineFunction &MF) const {
Igor Breger06335bb2017-09-17 14:02:19 +0000766 assert((I.getOpcode() == TargetOpcode::G_ZEXT) && "unexpected instruction");
Igor Bregerfda31e62017-05-10 06:52:58 +0000767
768 const unsigned DstReg = I.getOperand(0).getReg();
769 const unsigned SrcReg = I.getOperand(1).getReg();
770
771 const LLT DstTy = MRI.getType(DstReg);
772 const LLT SrcTy = MRI.getType(SrcReg);
773
Alexander Ivchenko327de802018-03-14 09:11:23 +0000774 assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(32)) &&
775 "8=>32 Zext is handled by tablegen");
776 assert(!(SrcTy == LLT::scalar(16) && DstTy == LLT::scalar(32)) &&
777 "16=>32 Zext is handled by tablegen");
778
779 const static struct ZextEntry {
780 LLT SrcTy;
781 LLT DstTy;
782 unsigned MovOp;
783 bool NeedSubregToReg;
784 } OpTable[] = {
785 {LLT::scalar(8), LLT::scalar(16), X86::MOVZX16rr8, false}, // i8 => i16
786 {LLT::scalar(8), LLT::scalar(64), X86::MOVZX32rr8, true}, // i8 => i64
787 {LLT::scalar(16), LLT::scalar(64), X86::MOVZX32rr16, true}, // i16 => i64
788 {LLT::scalar(32), LLT::scalar(64), 0, true} // i32 => i64
789 };
790
791 auto ZextEntryIt =
792 std::find_if(std::begin(OpTable), std::end(OpTable),
793 [SrcTy, DstTy](const ZextEntry &El) {
794 return El.DstTy == DstTy && El.SrcTy == SrcTy;
795 });
796
797 // Here we try to select Zext into a MOVZ and/or SUBREG_TO_REG instruction.
798 if (ZextEntryIt != std::end(OpTable)) {
799 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
800 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
801 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
802 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
803
804 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
805 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000806 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
807 << " operand\n");
Alexander Ivchenko327de802018-03-14 09:11:23 +0000808 return false;
809 }
810
811 unsigned TransitRegTo = DstReg;
812 unsigned TransitRegFrom = SrcReg;
813 if (ZextEntryIt->MovOp) {
814 // If we select Zext into MOVZ + SUBREG_TO_REG, we need to have
815 // a transit register in between: create it here.
816 if (ZextEntryIt->NeedSubregToReg) {
817 TransitRegFrom = MRI.createVirtualRegister(
818 getRegClass(LLT::scalar(32), DstReg, MRI));
819 TransitRegTo = TransitRegFrom;
820 }
821
822 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(ZextEntryIt->MovOp))
823 .addDef(TransitRegTo)
824 .addReg(SrcReg);
825 }
826 if (ZextEntryIt->NeedSubregToReg) {
827 BuildMI(*I.getParent(), I, I.getDebugLoc(),
828 TII.get(TargetOpcode::SUBREG_TO_REG))
829 .addDef(DstReg)
830 .addImm(0)
831 .addReg(TransitRegFrom)
832 .addImm(X86::sub_32bit);
833 }
834 I.eraseFromParent();
835 return true;
836 }
837
Igor Bregerd48c5e42017-07-10 09:07:34 +0000838 if (SrcTy != LLT::scalar(1))
839 return false;
Igor Bregerfda31e62017-05-10 06:52:58 +0000840
Igor Bregerd48c5e42017-07-10 09:07:34 +0000841 unsigned AndOpc;
842 if (DstTy == LLT::scalar(8))
Igor Breger324d3792017-07-11 08:04:51 +0000843 AndOpc = X86::AND8ri;
Igor Bregerd48c5e42017-07-10 09:07:34 +0000844 else if (DstTy == LLT::scalar(16))
845 AndOpc = X86::AND16ri8;
846 else if (DstTy == LLT::scalar(32))
847 AndOpc = X86::AND32ri8;
848 else if (DstTy == LLT::scalar(64))
849 AndOpc = X86::AND64ri8;
850 else
851 return false;
Igor Bregerfda31e62017-05-10 06:52:58 +0000852
Igor Bregerd48c5e42017-07-10 09:07:34 +0000853 unsigned DefReg = SrcReg;
854 if (DstTy != LLT::scalar(8)) {
855 DefReg = MRI.createVirtualRegister(getRegClass(DstTy, DstReg, MRI));
Igor Bregerfda31e62017-05-10 06:52:58 +0000856 BuildMI(*I.getParent(), I, I.getDebugLoc(),
857 TII.get(TargetOpcode::SUBREG_TO_REG), DefReg)
858 .addImm(0)
859 .addReg(SrcReg)
860 .addImm(X86::sub_8bit);
Igor Bregerfda31e62017-05-10 06:52:58 +0000861 }
862
Igor Bregerd48c5e42017-07-10 09:07:34 +0000863 MachineInstr &AndInst =
864 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AndOpc), DstReg)
865 .addReg(DefReg)
866 .addImm(1);
867
868 constrainSelectedInstRegOperands(AndInst, TII, TRI, RBI);
869
870 I.eraseFromParent();
871 return true;
Igor Bregerfda31e62017-05-10 06:52:58 +0000872}
873
Igor Breger1f143642017-09-11 09:41:13 +0000874bool X86InstructionSelector::selectAnyext(MachineInstr &I,
875 MachineRegisterInfo &MRI,
876 MachineFunction &MF) const {
Igor Breger06335bb2017-09-17 14:02:19 +0000877 assert((I.getOpcode() == TargetOpcode::G_ANYEXT) && "unexpected instruction");
Igor Breger1f143642017-09-11 09:41:13 +0000878
879 const unsigned DstReg = I.getOperand(0).getReg();
880 const unsigned SrcReg = I.getOperand(1).getReg();
881
882 const LLT DstTy = MRI.getType(DstReg);
883 const LLT SrcTy = MRI.getType(SrcReg);
884
885 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
886 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
887
Igor Breger21200ed2017-09-17 08:08:13 +0000888 assert(DstRB.getID() == SrcRB.getID() &&
889 "G_ANYEXT input/output on different banks\n");
Igor Breger1f143642017-09-11 09:41:13 +0000890
Igor Breger21200ed2017-09-17 08:08:13 +0000891 assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
892 "G_ANYEXT incorrect operand size");
Igor Breger1f143642017-09-11 09:41:13 +0000893
Igor Breger1f143642017-09-11 09:41:13 +0000894 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
895 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
896
Alexander Ivchenkoda9e81c2018-02-08 22:41:47 +0000897 // If that's ANY_EXT of the value that lives on the floating class and goes
898 // into the vector class, just replace it with copy, as we are able to select
899 // it as a regular move.
900 if (canTurnIntoCOPY(SrcRC, DstRC))
901 return selectTurnIntoCOPY(I, MRI, SrcReg, SrcRC, DstReg, DstRC);
902
903 if (DstRB.getID() != X86::GPRRegBankID)
904 return false;
905
Igor Breger1f143642017-09-11 09:41:13 +0000906 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
907 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000908 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
909 << " operand\n");
Igor Breger1f143642017-09-11 09:41:13 +0000910 return false;
911 }
912
913 if (SrcRC == DstRC) {
914 I.setDesc(TII.get(X86::COPY));
915 return true;
916 }
917
918 BuildMI(*I.getParent(), I, I.getDebugLoc(),
919 TII.get(TargetOpcode::SUBREG_TO_REG))
920 .addDef(DstReg)
921 .addImm(0)
922 .addReg(SrcReg)
923 .addImm(getSubRegIndex(SrcRC));
924
925 I.eraseFromParent();
926 return true;
927}
928
Igor Bregerc7b59772017-05-11 07:17:40 +0000929bool X86InstructionSelector::selectCmp(MachineInstr &I,
930 MachineRegisterInfo &MRI,
931 MachineFunction &MF) const {
Igor Breger06335bb2017-09-17 14:02:19 +0000932 assert((I.getOpcode() == TargetOpcode::G_ICMP) && "unexpected instruction");
Igor Bregerc7b59772017-05-11 07:17:40 +0000933
934 X86::CondCode CC;
935 bool SwapArgs;
936 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(
937 (CmpInst::Predicate)I.getOperand(1).getPredicate());
938 unsigned OpSet = X86::getSETFromCond(CC);
939
940 unsigned LHS = I.getOperand(2).getReg();
941 unsigned RHS = I.getOperand(3).getReg();
942
943 if (SwapArgs)
944 std::swap(LHS, RHS);
945
946 unsigned OpCmp;
947 LLT Ty = MRI.getType(LHS);
948
949 switch (Ty.getSizeInBits()) {
950 default:
951 return false;
952 case 8:
953 OpCmp = X86::CMP8rr;
954 break;
955 case 16:
956 OpCmp = X86::CMP16rr;
957 break;
958 case 32:
959 OpCmp = X86::CMP32rr;
960 break;
961 case 64:
962 OpCmp = X86::CMP64rr;
963 break;
964 }
965
966 MachineInstr &CmpInst =
967 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
968 .addReg(LHS)
969 .addReg(RHS);
970
971 MachineInstr &SetInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
972 TII.get(OpSet), I.getOperand(0).getReg());
973
974 constrainSelectedInstRegOperands(CmpInst, TII, TRI, RBI);
975 constrainSelectedInstRegOperands(SetInst, TII, TRI, RBI);
976
977 I.eraseFromParent();
978 return true;
979}
980
Alexander Ivchenkoa26a3642018-08-31 09:38:27 +0000981bool X86InstructionSelector::selectFCmp(MachineInstr &I,
982 MachineRegisterInfo &MRI,
983 MachineFunction &MF) const {
984 assert((I.getOpcode() == TargetOpcode::G_FCMP) && "unexpected instruction");
985
986 unsigned LhsReg = I.getOperand(2).getReg();
987 unsigned RhsReg = I.getOperand(3).getReg();
988 CmpInst::Predicate Predicate =
989 (CmpInst::Predicate)I.getOperand(1).getPredicate();
990
991 // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
992 static const uint16_t SETFOpcTable[2][3] = {
993 {X86::SETEr, X86::SETNPr, X86::AND8rr},
994 {X86::SETNEr, X86::SETPr, X86::OR8rr}};
995 const uint16_t *SETFOpc = nullptr;
996 switch (Predicate) {
997 default:
998 break;
999 case CmpInst::FCMP_OEQ:
1000 SETFOpc = &SETFOpcTable[0][0];
1001 break;
1002 case CmpInst::FCMP_UNE:
1003 SETFOpc = &SETFOpcTable[1][0];
1004 break;
1005 }
1006
1007 // Compute the opcode for the CMP instruction.
1008 unsigned OpCmp;
1009 LLT Ty = MRI.getType(LhsReg);
1010 switch (Ty.getSizeInBits()) {
1011 default:
1012 return false;
1013 case 32:
1014 OpCmp = X86::UCOMISSrr;
1015 break;
1016 case 64:
1017 OpCmp = X86::UCOMISDrr;
1018 break;
1019 }
1020
1021 unsigned ResultReg = I.getOperand(0).getReg();
1022 RBI.constrainGenericRegister(
1023 ResultReg,
1024 *getRegClass(LLT::scalar(8), *RBI.getRegBank(ResultReg, MRI, TRI)), MRI);
1025 if (SETFOpc) {
1026 MachineInstr &CmpInst =
1027 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
1028 .addReg(LhsReg)
1029 .addReg(RhsReg);
1030
1031 unsigned FlagReg1 = MRI.createVirtualRegister(&X86::GR8RegClass);
1032 unsigned FlagReg2 = MRI.createVirtualRegister(&X86::GR8RegClass);
1033 MachineInstr &Set1 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1034 TII.get(SETFOpc[0]), FlagReg1);
1035 MachineInstr &Set2 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1036 TII.get(SETFOpc[1]), FlagReg2);
1037 MachineInstr &Set3 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1038 TII.get(SETFOpc[2]), ResultReg)
1039 .addReg(FlagReg1)
1040 .addReg(FlagReg2);
1041 constrainSelectedInstRegOperands(CmpInst, TII, TRI, RBI);
1042 constrainSelectedInstRegOperands(Set1, TII, TRI, RBI);
1043 constrainSelectedInstRegOperands(Set2, TII, TRI, RBI);
1044 constrainSelectedInstRegOperands(Set3, TII, TRI, RBI);
1045
1046 I.eraseFromParent();
1047 return true;
1048 }
1049
1050 X86::CondCode CC;
1051 bool SwapArgs;
1052 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(Predicate);
1053 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
1054 unsigned Opc = X86::getSETFromCond(CC);
1055
1056 if (SwapArgs)
1057 std::swap(LhsReg, RhsReg);
1058
1059 // Emit a compare of LHS/RHS.
1060 MachineInstr &CmpInst =
1061 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
1062 .addReg(LhsReg)
1063 .addReg(RhsReg);
1064
1065 MachineInstr &Set =
1066 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opc), ResultReg);
1067 constrainSelectedInstRegOperands(CmpInst, TII, TRI, RBI);
1068 constrainSelectedInstRegOperands(Set, TII, TRI, RBI);
1069 I.eraseFromParent();
1070 return true;
1071}
1072
Igor Breger28f290f2017-05-17 12:48:08 +00001073bool X86InstructionSelector::selectUadde(MachineInstr &I,
1074 MachineRegisterInfo &MRI,
1075 MachineFunction &MF) const {
Igor Breger06335bb2017-09-17 14:02:19 +00001076 assert((I.getOpcode() == TargetOpcode::G_UADDE) && "unexpected instruction");
Igor Breger28f290f2017-05-17 12:48:08 +00001077
1078 const unsigned DstReg = I.getOperand(0).getReg();
1079 const unsigned CarryOutReg = I.getOperand(1).getReg();
1080 const unsigned Op0Reg = I.getOperand(2).getReg();
1081 const unsigned Op1Reg = I.getOperand(3).getReg();
1082 unsigned CarryInReg = I.getOperand(4).getReg();
1083
1084 const LLT DstTy = MRI.getType(DstReg);
1085
1086 if (DstTy != LLT::scalar(32))
1087 return false;
1088
1089 // find CarryIn def instruction.
1090 MachineInstr *Def = MRI.getVRegDef(CarryInReg);
1091 while (Def->getOpcode() == TargetOpcode::G_TRUNC) {
1092 CarryInReg = Def->getOperand(1).getReg();
1093 Def = MRI.getVRegDef(CarryInReg);
1094 }
1095
1096 unsigned Opcode;
1097 if (Def->getOpcode() == TargetOpcode::G_UADDE) {
1098 // carry set by prev ADD.
1099
1100 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), X86::EFLAGS)
1101 .addReg(CarryInReg);
1102
1103 if (!RBI.constrainGenericRegister(CarryInReg, X86::GR32RegClass, MRI))
1104 return false;
1105
1106 Opcode = X86::ADC32rr;
1107 } else if (auto val = getConstantVRegVal(CarryInReg, MRI)) {
1108 // carry is constant, support only 0.
1109 if (*val != 0)
1110 return false;
1111
1112 Opcode = X86::ADD32rr;
1113 } else
1114 return false;
1115
1116 MachineInstr &AddInst =
1117 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg)
1118 .addReg(Op0Reg)
1119 .addReg(Op1Reg);
1120
1121 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), CarryOutReg)
1122 .addReg(X86::EFLAGS);
1123
1124 if (!constrainSelectedInstRegOperands(AddInst, TII, TRI, RBI) ||
1125 !RBI.constrainGenericRegister(CarryOutReg, X86::GR32RegClass, MRI))
1126 return false;
1127
1128 I.eraseFromParent();
1129 return true;
1130}
1131
Igor Bregerf5035d62017-06-25 11:42:17 +00001132bool X86InstructionSelector::selectExtract(MachineInstr &I,
1133 MachineRegisterInfo &MRI,
1134 MachineFunction &MF) const {
Igor Breger06335bb2017-09-17 14:02:19 +00001135 assert((I.getOpcode() == TargetOpcode::G_EXTRACT) &&
1136 "unexpected instruction");
Igor Bregerf5035d62017-06-25 11:42:17 +00001137
1138 const unsigned DstReg = I.getOperand(0).getReg();
1139 const unsigned SrcReg = I.getOperand(1).getReg();
1140 int64_t Index = I.getOperand(2).getImm();
1141
1142 const LLT DstTy = MRI.getType(DstReg);
1143 const LLT SrcTy = MRI.getType(SrcReg);
1144
1145 // Meanwile handle vector type only.
1146 if (!DstTy.isVector())
1147 return false;
1148
1149 if (Index % DstTy.getSizeInBits() != 0)
1150 return false; // Not extract subvector.
1151
1152 if (Index == 0) {
1153 // Replace by extract subreg copy.
1154 if (!emitExtractSubreg(DstReg, SrcReg, I, MRI, MF))
1155 return false;
1156
1157 I.eraseFromParent();
1158 return true;
1159 }
1160
1161 bool HasAVX = STI.hasAVX();
1162 bool HasAVX512 = STI.hasAVX512();
1163 bool HasVLX = STI.hasVLX();
1164
1165 if (SrcTy.getSizeInBits() == 256 && DstTy.getSizeInBits() == 128) {
1166 if (HasVLX)
1167 I.setDesc(TII.get(X86::VEXTRACTF32x4Z256rr));
1168 else if (HasAVX)
1169 I.setDesc(TII.get(X86::VEXTRACTF128rr));
1170 else
1171 return false;
1172 } else if (SrcTy.getSizeInBits() == 512 && HasAVX512) {
1173 if (DstTy.getSizeInBits() == 128)
1174 I.setDesc(TII.get(X86::VEXTRACTF32x4Zrr));
1175 else if (DstTy.getSizeInBits() == 256)
1176 I.setDesc(TII.get(X86::VEXTRACTF64x4Zrr));
1177 else
1178 return false;
1179 } else
1180 return false;
1181
1182 // Convert to X86 VEXTRACT immediate.
1183 Index = Index / DstTy.getSizeInBits();
1184 I.getOperand(2).setImm(Index);
1185
1186 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1187}
1188
1189bool X86InstructionSelector::emitExtractSubreg(unsigned DstReg, unsigned SrcReg,
1190 MachineInstr &I,
1191 MachineRegisterInfo &MRI,
1192 MachineFunction &MF) const {
Igor Bregerf5035d62017-06-25 11:42:17 +00001193 const LLT DstTy = MRI.getType(DstReg);
1194 const LLT SrcTy = MRI.getType(SrcReg);
1195 unsigned SubIdx = X86::NoSubRegister;
1196
1197 if (!DstTy.isVector() || !SrcTy.isVector())
1198 return false;
1199
1200 assert(SrcTy.getSizeInBits() > DstTy.getSizeInBits() &&
1201 "Incorrect Src/Dst register size");
1202
1203 if (DstTy.getSizeInBits() == 128)
1204 SubIdx = X86::sub_xmm;
1205 else if (DstTy.getSizeInBits() == 256)
1206 SubIdx = X86::sub_ymm;
1207 else
1208 return false;
1209
1210 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
1211 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
1212
1213 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
1214
1215 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1216 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001217 LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
Igor Bregerf5035d62017-06-25 11:42:17 +00001218 return false;
1219 }
1220
1221 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), DstReg)
1222 .addReg(SrcReg, 0, SubIdx);
1223
1224 return true;
1225}
1226
Igor Breger1c29be72017-06-22 09:43:35 +00001227bool X86InstructionSelector::emitInsertSubreg(unsigned DstReg, unsigned SrcReg,
1228 MachineInstr &I,
1229 MachineRegisterInfo &MRI,
1230 MachineFunction &MF) const {
Igor Breger1c29be72017-06-22 09:43:35 +00001231 const LLT DstTy = MRI.getType(DstReg);
1232 const LLT SrcTy = MRI.getType(SrcReg);
1233 unsigned SubIdx = X86::NoSubRegister;
1234
1235 // TODO: support scalar types
1236 if (!DstTy.isVector() || !SrcTy.isVector())
1237 return false;
1238
1239 assert(SrcTy.getSizeInBits() < DstTy.getSizeInBits() &&
1240 "Incorrect Src/Dst register size");
1241
1242 if (SrcTy.getSizeInBits() == 128)
1243 SubIdx = X86::sub_xmm;
1244 else if (SrcTy.getSizeInBits() == 256)
1245 SubIdx = X86::sub_ymm;
1246 else
1247 return false;
1248
1249 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
1250 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
1251
1252 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1253 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001254 LLVM_DEBUG(dbgs() << "Failed to constrain INSERT_SUBREG\n");
Igor Breger1c29be72017-06-22 09:43:35 +00001255 return false;
1256 }
1257
1258 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY))
1259 .addReg(DstReg, RegState::DefineNoRead, SubIdx)
1260 .addReg(SrcReg);
1261
1262 return true;
1263}
1264
1265bool X86InstructionSelector::selectInsert(MachineInstr &I,
1266 MachineRegisterInfo &MRI,
1267 MachineFunction &MF) const {
Igor Breger06335bb2017-09-17 14:02:19 +00001268 assert((I.getOpcode() == TargetOpcode::G_INSERT) && "unexpected instruction");
Igor Breger1c29be72017-06-22 09:43:35 +00001269
1270 const unsigned DstReg = I.getOperand(0).getReg();
1271 const unsigned SrcReg = I.getOperand(1).getReg();
1272 const unsigned InsertReg = I.getOperand(2).getReg();
1273 int64_t Index = I.getOperand(3).getImm();
1274
1275 const LLT DstTy = MRI.getType(DstReg);
1276 const LLT InsertRegTy = MRI.getType(InsertReg);
1277
1278 // Meanwile handle vector type only.
1279 if (!DstTy.isVector())
1280 return false;
1281
1282 if (Index % InsertRegTy.getSizeInBits() != 0)
1283 return false; // Not insert subvector.
1284
1285 if (Index == 0 && MRI.getVRegDef(SrcReg)->isImplicitDef()) {
1286 // Replace by subreg copy.
1287 if (!emitInsertSubreg(DstReg, InsertReg, I, MRI, MF))
1288 return false;
1289
1290 I.eraseFromParent();
1291 return true;
1292 }
1293
1294 bool HasAVX = STI.hasAVX();
1295 bool HasAVX512 = STI.hasAVX512();
1296 bool HasVLX = STI.hasVLX();
1297
1298 if (DstTy.getSizeInBits() == 256 && InsertRegTy.getSizeInBits() == 128) {
1299 if (HasVLX)
1300 I.setDesc(TII.get(X86::VINSERTF32x4Z256rr));
1301 else if (HasAVX)
1302 I.setDesc(TII.get(X86::VINSERTF128rr));
1303 else
1304 return false;
1305 } else if (DstTy.getSizeInBits() == 512 && HasAVX512) {
1306 if (InsertRegTy.getSizeInBits() == 128)
1307 I.setDesc(TII.get(X86::VINSERTF32x4Zrr));
1308 else if (InsertRegTy.getSizeInBits() == 256)
1309 I.setDesc(TII.get(X86::VINSERTF64x4Zrr));
1310 else
1311 return false;
1312 } else
1313 return false;
1314
1315 // Convert to X86 VINSERT immediate.
1316 Index = Index / InsertRegTy.getSizeInBits();
1317
1318 I.getOperand(3).setImm(Index);
1319
1320 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1321}
1322
Daniel Sandersf76f3152017-11-16 00:46:35 +00001323bool X86InstructionSelector::selectUnmergeValues(
1324 MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF,
1325 CodeGenCoverage &CoverageInfo) const {
Igor Breger06335bb2017-09-17 14:02:19 +00001326 assert((I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES) &&
1327 "unexpected instruction");
Igor Bregerb186a692017-07-02 08:15:49 +00001328
1329 // Split to extracts.
1330 unsigned NumDefs = I.getNumOperands() - 1;
1331 unsigned SrcReg = I.getOperand(NumDefs).getReg();
1332 unsigned DefSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
1333
1334 for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {
Igor Bregerb186a692017-07-02 08:15:49 +00001335 MachineInstr &ExtrInst =
1336 *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1337 TII.get(TargetOpcode::G_EXTRACT), I.getOperand(Idx).getReg())
1338 .addReg(SrcReg)
1339 .addImm(Idx * DefSize);
1340
Daniel Sandersf76f3152017-11-16 00:46:35 +00001341 if (!select(ExtrInst, CoverageInfo))
Igor Bregerb186a692017-07-02 08:15:49 +00001342 return false;
1343 }
1344
1345 I.eraseFromParent();
1346 return true;
1347}
1348
Daniel Sandersf76f3152017-11-16 00:46:35 +00001349bool X86InstructionSelector::selectMergeValues(
1350 MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF,
1351 CodeGenCoverage &CoverageInfo) const {
Amara Emerson5ec14602018-12-10 18:44:58 +00001352 assert((I.getOpcode() == TargetOpcode::G_MERGE_VALUES ||
1353 I.getOpcode() == TargetOpcode::G_CONCAT_VECTORS) &&
Igor Breger06335bb2017-09-17 14:02:19 +00001354 "unexpected instruction");
Igor Breger0cddd342017-06-29 12:08:28 +00001355
1356 // Split to inserts.
1357 unsigned DstReg = I.getOperand(0).getReg();
1358 unsigned SrcReg0 = I.getOperand(1).getReg();
1359
1360 const LLT DstTy = MRI.getType(DstReg);
1361 const LLT SrcTy = MRI.getType(SrcReg0);
1362 unsigned SrcSize = SrcTy.getSizeInBits();
1363
1364 const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
1365
1366 // For the first src use insertSubReg.
1367 unsigned DefReg = MRI.createGenericVirtualRegister(DstTy);
1368 MRI.setRegBank(DefReg, RegBank);
1369 if (!emitInsertSubreg(DefReg, I.getOperand(1).getReg(), I, MRI, MF))
1370 return false;
1371
1372 for (unsigned Idx = 2; Idx < I.getNumOperands(); ++Idx) {
Igor Breger0cddd342017-06-29 12:08:28 +00001373 unsigned Tmp = MRI.createGenericVirtualRegister(DstTy);
1374 MRI.setRegBank(Tmp, RegBank);
1375
1376 MachineInstr &InsertInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1377 TII.get(TargetOpcode::G_INSERT), Tmp)
1378 .addReg(DefReg)
1379 .addReg(I.getOperand(Idx).getReg())
1380 .addImm((Idx - 1) * SrcSize);
1381
1382 DefReg = Tmp;
1383
Daniel Sandersf76f3152017-11-16 00:46:35 +00001384 if (!select(InsertInst, CoverageInfo))
Igor Breger0cddd342017-06-29 12:08:28 +00001385 return false;
1386 }
1387
1388 MachineInstr &CopyInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1389 TII.get(TargetOpcode::COPY), DstReg)
1390 .addReg(DefReg);
1391
Daniel Sandersf76f3152017-11-16 00:46:35 +00001392 if (!select(CopyInst, CoverageInfo))
Igor Breger0cddd342017-06-29 12:08:28 +00001393 return false;
1394
1395 I.eraseFromParent();
1396 return true;
1397}
Igor Breger685889c2017-08-21 10:51:54 +00001398
1399bool X86InstructionSelector::selectCondBranch(MachineInstr &I,
1400 MachineRegisterInfo &MRI,
1401 MachineFunction &MF) const {
Igor Breger06335bb2017-09-17 14:02:19 +00001402 assert((I.getOpcode() == TargetOpcode::G_BRCOND) && "unexpected instruction");
Igor Breger685889c2017-08-21 10:51:54 +00001403
1404 const unsigned CondReg = I.getOperand(0).getReg();
1405 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
1406
1407 MachineInstr &TestInst =
1408 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::TEST8ri))
1409 .addReg(CondReg)
1410 .addImm(1);
1411 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::JNE_1))
1412 .addMBB(DestMBB);
1413
1414 constrainSelectedInstRegOperands(TestInst, TII, TRI, RBI);
1415
1416 I.eraseFromParent();
1417 return true;
1418}
1419
Igor Breger21200ed2017-09-17 08:08:13 +00001420bool X86InstructionSelector::materializeFP(MachineInstr &I,
1421 MachineRegisterInfo &MRI,
1422 MachineFunction &MF) const {
Igor Breger06335bb2017-09-17 14:02:19 +00001423 assert((I.getOpcode() == TargetOpcode::G_FCONSTANT) &&
1424 "unexpected instruction");
Igor Breger21200ed2017-09-17 08:08:13 +00001425
1426 // Can't handle alternate code models yet.
1427 CodeModel::Model CM = TM.getCodeModel();
1428 if (CM != CodeModel::Small && CM != CodeModel::Large)
1429 return false;
1430
1431 const unsigned DstReg = I.getOperand(0).getReg();
1432 const LLT DstTy = MRI.getType(DstReg);
1433 const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
1434 unsigned Align = DstTy.getSizeInBits();
1435 const DebugLoc &DbgLoc = I.getDebugLoc();
1436
1437 unsigned Opc = getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Align);
1438
1439 // Create the load from the constant pool.
1440 const ConstantFP *CFP = I.getOperand(1).getFPImm();
1441 unsigned CPI = MF.getConstantPool()->getConstantPoolIndex(CFP, Align);
1442 MachineInstr *LoadInst = nullptr;
1443 unsigned char OpFlag = STI.classifyLocalReference(nullptr);
1444
1445 if (CM == CodeModel::Large && STI.is64Bit()) {
1446 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
1447 // they cannot be folded into immediate fields.
1448
1449 unsigned AddrReg = MRI.createVirtualRegister(&X86::GR64RegClass);
1450 BuildMI(*I.getParent(), I, DbgLoc, TII.get(X86::MOV64ri), AddrReg)
1451 .addConstantPoolIndex(CPI, 0, OpFlag);
1452
1453 MachineMemOperand *MMO = MF.getMachineMemOperand(
1454 MachinePointerInfo::getConstantPool(MF), MachineMemOperand::MOLoad,
1455 MF.getDataLayout().getPointerSize(), Align);
1456
1457 LoadInst =
1458 addDirectMem(BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg),
1459 AddrReg)
1460 .addMemOperand(MMO);
1461
Igor Breger06335bb2017-09-17 14:02:19 +00001462 } else if (CM == CodeModel::Small || !STI.is64Bit()) {
Igor Breger21200ed2017-09-17 08:08:13 +00001463 // Handle the case when globals fit in our immediate field.
1464 // This is true for X86-32 always and X86-64 when in -mcmodel=small mode.
1465
1466 // x86-32 PIC requires a PIC base register for constant pools.
1467 unsigned PICBase = 0;
1468 if (OpFlag == X86II::MO_PIC_BASE_OFFSET || OpFlag == X86II::MO_GOTOFF) {
1469 // PICBase can be allocated by TII.getGlobalBaseReg(&MF).
1470 // In DAGISEL the code that initialize it generated by the CGBR pass.
1471 return false; // TODO support the mode.
Igor Breger06335bb2017-09-17 14:02:19 +00001472 } else if (STI.is64Bit() && TM.getCodeModel() == CodeModel::Small)
Igor Breger21200ed2017-09-17 08:08:13 +00001473 PICBase = X86::RIP;
1474
1475 LoadInst = addConstantPoolReference(
1476 BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg), CPI, PICBase,
1477 OpFlag);
1478 } else
1479 return false;
1480
1481 constrainSelectedInstRegOperands(*LoadInst, TII, TRI, RBI);
1482 I.eraseFromParent();
1483 return true;
1484}
1485
Igor Breger2661ae42017-09-04 09:06:45 +00001486bool X86InstructionSelector::selectImplicitDefOrPHI(
1487 MachineInstr &I, MachineRegisterInfo &MRI) const {
Igor Breger06335bb2017-09-17 14:02:19 +00001488 assert((I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
1489 I.getOpcode() == TargetOpcode::G_PHI) &&
1490 "unexpected instruction");
Igor Breger47be5fb2017-08-24 07:06:27 +00001491
1492 unsigned DstReg = I.getOperand(0).getReg();
1493
1494 if (!MRI.getRegClassOrNull(DstReg)) {
1495 const LLT DstTy = MRI.getType(DstReg);
1496 const TargetRegisterClass *RC = getRegClass(DstTy, DstReg, MRI);
1497
1498 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001499 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
1500 << " operand\n");
Igor Breger47be5fb2017-08-24 07:06:27 +00001501 return false;
1502 }
1503 }
1504
Igor Breger2661ae42017-09-04 09:06:45 +00001505 if (I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1506 I.setDesc(TII.get(X86::IMPLICIT_DEF));
1507 else
1508 I.setDesc(TII.get(X86::PHI));
1509
Igor Breger47be5fb2017-08-24 07:06:27 +00001510 return true;
1511}
1512
Alexander Ivchenko0bd4d8c2018-03-14 11:23:57 +00001513// Currently GlobalIsel TableGen generates patterns for shift imm and shift 1,
1514// but with shiftCount i8. In G_LSHR/G_ASHR/G_SHL like LLVM-IR both arguments
1515// has the same type, so for now only shift i8 can use auto generated
1516// TableGen patterns.
1517bool X86InstructionSelector::selectShift(MachineInstr &I,
1518 MachineRegisterInfo &MRI,
1519 MachineFunction &MF) const {
1520
1521 assert((I.getOpcode() == TargetOpcode::G_SHL ||
1522 I.getOpcode() == TargetOpcode::G_ASHR ||
1523 I.getOpcode() == TargetOpcode::G_LSHR) &&
1524 "unexpected instruction");
1525
1526 unsigned DstReg = I.getOperand(0).getReg();
1527 const LLT DstTy = MRI.getType(DstReg);
1528 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
1529
1530 const static struct ShiftEntry {
1531 unsigned SizeInBits;
Alexander Ivchenko0bd4d8c2018-03-14 11:23:57 +00001532 unsigned OpLSHR;
1533 unsigned OpASHR;
1534 unsigned OpSHL;
1535 } OpTable[] = {
Matt Arsenault30989e42019-01-22 21:42:11 +00001536 {8, X86::SHR8rCL, X86::SAR8rCL, X86::SHL8rCL}, // i8
1537 {16, X86::SHR16rCL, X86::SAR16rCL, X86::SHL16rCL}, // i16
1538 {32, X86::SHR32rCL, X86::SAR32rCL, X86::SHL32rCL}, // i32
1539 {64, X86::SHR64rCL, X86::SAR64rCL, X86::SHL64rCL} // i64
Alexander Ivchenko0bd4d8c2018-03-14 11:23:57 +00001540 };
1541
1542 if (DstRB.getID() != X86::GPRRegBankID)
1543 return false;
1544
1545 auto ShiftEntryIt = std::find_if(
1546 std::begin(OpTable), std::end(OpTable), [DstTy](const ShiftEntry &El) {
1547 return El.SizeInBits == DstTy.getSizeInBits();
1548 });
1549 if (ShiftEntryIt == std::end(OpTable))
1550 return false;
1551
Alexander Ivchenko0bd4d8c2018-03-14 11:23:57 +00001552 unsigned Opcode = 0;
1553 switch (I.getOpcode()) {
1554 case TargetOpcode::G_SHL:
1555 Opcode = ShiftEntryIt->OpSHL;
1556 break;
1557 case TargetOpcode::G_ASHR:
1558 Opcode = ShiftEntryIt->OpASHR;
1559 break;
1560 case TargetOpcode::G_LSHR:
1561 Opcode = ShiftEntryIt->OpLSHR;
1562 break;
1563 default:
1564 return false;
1565 }
1566
1567 unsigned Op0Reg = I.getOperand(1).getReg();
1568 unsigned Op1Reg = I.getOperand(2).getReg();
1569
Matt Arsenault30989e42019-01-22 21:42:11 +00001570 assert(MRI.getType(Op1Reg).getSizeInBits() == 8);
Alexander Ivchenko0bd4d8c2018-03-14 11:23:57 +00001571
Matt Arsenault30989e42019-01-22 21:42:11 +00001572 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),
1573 X86::CL)
1574 .addReg(Op1Reg);
Alexander Ivchenko0bd4d8c2018-03-14 11:23:57 +00001575
1576 MachineInstr &ShiftInst =
1577 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg)
1578 .addReg(Op0Reg);
1579
1580 constrainSelectedInstRegOperands(ShiftInst, TII, TRI, RBI);
1581 I.eraseFromParent();
1582 return true;
1583}
1584
Alexander Ivchenko1aedf202018-10-08 13:40:34 +00001585bool X86InstructionSelector::selectDivRem(MachineInstr &I,
1586 MachineRegisterInfo &MRI,
1587 MachineFunction &MF) const {
1588 // The implementation of this function is taken from X86FastISel.
1589 assert((I.getOpcode() == TargetOpcode::G_SDIV ||
1590 I.getOpcode() == TargetOpcode::G_SREM ||
1591 I.getOpcode() == TargetOpcode::G_UDIV ||
1592 I.getOpcode() == TargetOpcode::G_UREM) &&
1593 "unexpected instruction");
Alexander Ivchenko86ef9ab2018-03-14 15:41:11 +00001594
1595 const unsigned DstReg = I.getOperand(0).getReg();
Alexander Ivchenko1aedf202018-10-08 13:40:34 +00001596 const unsigned Op1Reg = I.getOperand(1).getReg();
1597 const unsigned Op2Reg = I.getOperand(2).getReg();
Alexander Ivchenko86ef9ab2018-03-14 15:41:11 +00001598
1599 const LLT RegTy = MRI.getType(DstReg);
Alexander Ivchenko1aedf202018-10-08 13:40:34 +00001600 assert(RegTy == MRI.getType(Op1Reg) && RegTy == MRI.getType(Op2Reg) &&
Alexander Ivchenko86ef9ab2018-03-14 15:41:11 +00001601 "Arguments and return value types must match");
1602
1603 const RegisterBank &RegRB = *RBI.getRegBank(DstReg, MRI, TRI);
Alexander Ivchenko1aedf202018-10-08 13:40:34 +00001604 if (RegRB.getID() != X86::GPRRegBankID)
1605 return false;
Alexander Ivchenko86ef9ab2018-03-14 15:41:11 +00001606
Alexander Ivchenko1aedf202018-10-08 13:40:34 +00001607 const static unsigned NumTypes = 4; // i8, i16, i32, i64
1608 const static unsigned NumOps = 4; // SDiv, SRem, UDiv, URem
1609 const static bool S = true; // IsSigned
1610 const static bool U = false; // !IsSigned
1611 const static unsigned Copy = TargetOpcode::COPY;
Alexander Ivchenko86ef9ab2018-03-14 15:41:11 +00001612 // For the X86 IDIV instruction, in most cases the dividend
1613 // (numerator) must be in a specific register pair highreg:lowreg,
1614 // producing the quotient in lowreg and the remainder in highreg.
1615 // For most data types, to set up the instruction, the dividend is
1616 // copied into lowreg, and lowreg is sign-extended into highreg. The
1617 // exception is i8, where the dividend is defined as a single register rather
1618 // than a register pair, and we therefore directly sign-extend the dividend
1619 // into lowreg, instead of copying, and ignore the highreg.
Alexander Ivchenko1aedf202018-10-08 13:40:34 +00001620 const static struct DivRemEntry {
1621 // The following portion depends only on the data type.
Alexander Ivchenko86ef9ab2018-03-14 15:41:11 +00001622 unsigned SizeInBits;
Alexander Ivchenko1aedf202018-10-08 13:40:34 +00001623 unsigned LowInReg; // low part of the register pair
1624 unsigned HighInReg; // high part of the register pair
1625 // The following portion depends on both the data type and the operation.
1626 struct DivRemResult {
1627 unsigned OpDivRem; // The specific DIV/IDIV opcode to use.
1628 unsigned OpSignExtend; // Opcode for sign-extending lowreg into
1629 // highreg, or copying a zero into highreg.
1630 unsigned OpCopy; // Opcode for copying dividend into lowreg, or
1631 // zero/sign-extending into lowreg for i8.
1632 unsigned DivRemResultReg; // Register containing the desired result.
1633 bool IsOpSigned; // Whether to use signed or unsigned form.
1634 } ResultTable[NumOps];
1635 } OpTable[NumTypes] = {
1636 {8,
1637 X86::AX,
1638 0,
1639 {
1640 {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S}, // SDiv
1641 {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S}, // SRem
1642 {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL, U}, // UDiv
1643 {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH, U}, // URem
1644 }}, // i8
1645 {16,
1646 X86::AX,
1647 X86::DX,
1648 {
1649 {X86::IDIV16r, X86::CWD, Copy, X86::AX, S}, // SDiv
1650 {X86::IDIV16r, X86::CWD, Copy, X86::DX, S}, // SRem
1651 {X86::DIV16r, X86::MOV32r0, Copy, X86::AX, U}, // UDiv
1652 {X86::DIV16r, X86::MOV32r0, Copy, X86::DX, U}, // URem
1653 }}, // i16
1654 {32,
1655 X86::EAX,
1656 X86::EDX,
1657 {
1658 {X86::IDIV32r, X86::CDQ, Copy, X86::EAX, S}, // SDiv
1659 {X86::IDIV32r, X86::CDQ, Copy, X86::EDX, S}, // SRem
1660 {X86::DIV32r, X86::MOV32r0, Copy, X86::EAX, U}, // UDiv
1661 {X86::DIV32r, X86::MOV32r0, Copy, X86::EDX, U}, // URem
1662 }}, // i32
1663 {64,
1664 X86::RAX,
1665 X86::RDX,
1666 {
1667 {X86::IDIV64r, X86::CQO, Copy, X86::RAX, S}, // SDiv
1668 {X86::IDIV64r, X86::CQO, Copy, X86::RDX, S}, // SRem
1669 {X86::DIV64r, X86::MOV32r0, Copy, X86::RAX, U}, // UDiv
1670 {X86::DIV64r, X86::MOV32r0, Copy, X86::RDX, U}, // URem
1671 }}, // i64
Alexander Ivchenko86ef9ab2018-03-14 15:41:11 +00001672 };
1673
Alexander Ivchenko1aedf202018-10-08 13:40:34 +00001674 auto OpEntryIt = std::find_if(std::begin(OpTable), std::end(OpTable),
1675 [RegTy](const DivRemEntry &El) {
1676 return El.SizeInBits == RegTy.getSizeInBits();
1677 });
1678 if (OpEntryIt == std::end(OpTable))
Alexander Ivchenko86ef9ab2018-03-14 15:41:11 +00001679 return false;
1680
Alexander Ivchenko1aedf202018-10-08 13:40:34 +00001681 unsigned OpIndex;
1682 switch (I.getOpcode()) {
1683 default:
1684 llvm_unreachable("Unexpected div/rem opcode");
1685 case TargetOpcode::G_SDIV:
1686 OpIndex = 0;
1687 break;
1688 case TargetOpcode::G_SREM:
1689 OpIndex = 1;
1690 break;
1691 case TargetOpcode::G_UDIV:
1692 OpIndex = 2;
1693 break;
1694 case TargetOpcode::G_UREM:
1695 OpIndex = 3;
1696 break;
1697 }
Alexander Ivchenko86ef9ab2018-03-14 15:41:11 +00001698
Alexander Ivchenko1aedf202018-10-08 13:40:34 +00001699 const DivRemEntry &TypeEntry = *OpEntryIt;
1700 const DivRemEntry::DivRemResult &OpEntry = TypeEntry.ResultTable[OpIndex];
Alexander Ivchenko86ef9ab2018-03-14 15:41:11 +00001701
1702 const TargetRegisterClass *RegRC = getRegClass(RegTy, RegRB);
Alexander Ivchenko1aedf202018-10-08 13:40:34 +00001703 if (!RBI.constrainGenericRegister(Op1Reg, *RegRC, MRI) ||
1704 !RBI.constrainGenericRegister(Op2Reg, *RegRC, MRI) ||
Alexander Ivchenko86ef9ab2018-03-14 15:41:11 +00001705 !RBI.constrainGenericRegister(DstReg, *RegRC, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001706 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
1707 << " operand\n");
Alexander Ivchenko86ef9ab2018-03-14 15:41:11 +00001708 return false;
1709 }
1710
Alexander Ivchenko1aedf202018-10-08 13:40:34 +00001711 // Move op1 into low-order input register.
1712 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpEntry.OpCopy),
1713 TypeEntry.LowInReg)
1714 .addReg(Op1Reg);
1715 // Zero-extend or sign-extend into high-order input register.
1716 if (OpEntry.OpSignExtend) {
1717 if (OpEntry.IsOpSigned)
1718 BuildMI(*I.getParent(), I, I.getDebugLoc(),
1719 TII.get(OpEntry.OpSignExtend));
1720 else {
1721 unsigned Zero32 = MRI.createVirtualRegister(&X86::GR32RegClass);
1722 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::MOV32r0),
1723 Zero32);
Alexander Ivchenko86ef9ab2018-03-14 15:41:11 +00001724
Alexander Ivchenko1aedf202018-10-08 13:40:34 +00001725 // Copy the zero into the appropriate sub/super/identical physical
1726 // register. Unfortunately the operations needed are not uniform enough
1727 // to fit neatly into the table above.
1728 if (RegTy.getSizeInBits() == 16) {
1729 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy),
1730 TypeEntry.HighInReg)
1731 .addReg(Zero32, 0, X86::sub_16bit);
1732 } else if (RegTy.getSizeInBits() == 32) {
1733 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy),
1734 TypeEntry.HighInReg)
1735 .addReg(Zero32);
1736 } else if (RegTy.getSizeInBits() == 64) {
1737 BuildMI(*I.getParent(), I, I.getDebugLoc(),
1738 TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg)
1739 .addImm(0)
1740 .addReg(Zero32)
1741 .addImm(X86::sub_32bit);
1742 }
1743 }
1744 }
1745 // Generate the DIV/IDIV instruction.
1746 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpEntry.OpDivRem))
1747 .addReg(Op2Reg);
1748 // For i8 remainder, we can't reference ah directly, as we'll end
1749 // up with bogus copies like %r9b = COPY %ah. Reference ax
1750 // instead to prevent ah references in a rex instruction.
1751 //
1752 // The current assumption of the fast register allocator is that isel
1753 // won't generate explicit references to the GR8_NOREX registers. If
1754 // the allocator and/or the backend get enhanced to be more robust in
1755 // that regard, this can be, and should be, removed.
1756 if ((I.getOpcode() == Instruction::SRem ||
1757 I.getOpcode() == Instruction::URem) &&
1758 OpEntry.DivRemResultReg == X86::AH && STI.is64Bit()) {
1759 unsigned SourceSuperReg = MRI.createVirtualRegister(&X86::GR16RegClass);
1760 unsigned ResultSuperReg = MRI.createVirtualRegister(&X86::GR16RegClass);
1761 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy), SourceSuperReg)
1762 .addReg(X86::AX);
1763
1764 // Shift AX right by 8 bits instead of using AH.
1765 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SHR16ri),
1766 ResultSuperReg)
1767 .addReg(SourceSuperReg)
1768 .addImm(8);
1769
1770 // Now reference the 8-bit subreg of the result.
1771 BuildMI(*I.getParent(), I, I.getDebugLoc(),
1772 TII.get(TargetOpcode::SUBREG_TO_REG))
1773 .addDef(DstReg)
1774 .addImm(0)
1775 .addReg(ResultSuperReg)
1776 .addImm(X86::sub_8bit);
1777 } else {
1778 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),
1779 DstReg)
1780 .addReg(OpEntry.DivRemResultReg);
1781 }
Alexander Ivchenko86ef9ab2018-03-14 15:41:11 +00001782 I.eraseFromParent();
1783 return true;
1784}
1785
Alexander Ivchenko58a5d6f2018-08-31 11:05:13 +00001786bool X86InstructionSelector::selectIntrinsicWSideEffects(
1787 MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF) const {
1788
1789 assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS &&
1790 "unexpected instruction");
1791
1792 if (I.getOperand(0).getIntrinsicID() != Intrinsic::trap)
1793 return false;
1794
1795 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::TRAP));
1796
1797 I.eraseFromParent();
1798 return true;
1799}
1800
Daniel Sanders0b5293f2017-04-06 09:49:34 +00001801InstructionSelector *
Daniel Sanderse7b0d662017-04-21 15:59:56 +00001802llvm::createX86InstructionSelector(const X86TargetMachine &TM,
1803 X86Subtarget &Subtarget,
Daniel Sanders0b5293f2017-04-06 09:49:34 +00001804 X86RegisterBankInfo &RBI) {
Daniel Sanderse7b0d662017-04-21 15:59:56 +00001805 return new X86InstructionSelector(TM, Subtarget, RBI);
Daniel Sanders0b5293f2017-04-06 09:49:34 +00001806}