blob: 1457553e55f996a4005606d518aac9e89b5b48f9 [file] [log] [blame]
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001//===-- X86FastISel.cpp - X86 FastISel implementation ---------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the X86-specific support for the FastISel class. Much
11// of the target-specific code is generated by tablegen in the file
12// X86GenFastISel.inc, which is #included here.
13//
14//===----------------------------------------------------------------------===//
15
16#include "X86.h"
17#include "X86CallingConv.h"
18#include "X86InstrBuilder.h"
19#include "X86InstrInfo.h"
20#include "X86MachineFunctionInfo.h"
21#include "X86RegisterInfo.h"
22#include "X86Subtarget.h"
23#include "X86TargetMachine.h"
24#include "llvm/Analysis/BranchProbabilityInfo.h"
Michael Kupersteine86aa9a2015-02-01 16:15:07 +000025#include "llvm/CodeGen/FastISel.h"
26#include "llvm/CodeGen/FunctionLoweringInfo.h"
27#include "llvm/CodeGen/MachineConstantPool.h"
28#include "llvm/CodeGen/MachineFrameInfo.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/IR/CallSite.h"
31#include "llvm/IR/CallingConv.h"
Reid Kleckner28865802016-04-14 18:29:59 +000032#include "llvm/IR/DebugInfo.h"
Michael Kupersteine86aa9a2015-02-01 16:15:07 +000033#include "llvm/IR/DerivedTypes.h"
34#include "llvm/IR/GetElementPtrTypeIterator.h"
35#include "llvm/IR/GlobalAlias.h"
36#include "llvm/IR/GlobalVariable.h"
37#include "llvm/IR/Instructions.h"
38#include "llvm/IR/IntrinsicInst.h"
39#include "llvm/IR/Operator.h"
David Majnemerca194852015-02-10 22:00:34 +000040#include "llvm/MC/MCAsmInfo.h"
Rafael Espindolace4c2bc2015-06-23 12:21:54 +000041#include "llvm/MC/MCSymbol.h"
Michael Kupersteine86aa9a2015-02-01 16:15:07 +000042#include "llvm/Support/ErrorHandling.h"
43#include "llvm/Target/TargetOptions.h"
44using namespace llvm;
45
46namespace {
47
48class X86FastISel final : public FastISel {
49 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
50 /// make the right decision when generating code for different targets.
51 const X86Subtarget *Subtarget;
52
53 /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87
54 /// floating point ops.
55 /// When SSE is available, use it for f32 operations.
56 /// When SSE2 is available, use it for f64 operations.
57 bool X86ScalarSSEf64;
58 bool X86ScalarSSEf32;
59
60public:
61 explicit X86FastISel(FunctionLoweringInfo &funcInfo,
62 const TargetLibraryInfo *libInfo)
Eric Christophera1c535b2015-02-02 23:03:45 +000063 : FastISel(funcInfo, libInfo) {
64 Subtarget = &funcInfo.MF->getSubtarget<X86Subtarget>();
Michael Kupersteine86aa9a2015-02-01 16:15:07 +000065 X86ScalarSSEf64 = Subtarget->hasSSE2();
66 X86ScalarSSEf32 = Subtarget->hasSSE1();
67 }
68
69 bool fastSelectInstruction(const Instruction *I) override;
70
71 /// \brief The specified machine instr operand is a vreg, and that
72 /// vreg is being provided by the specified load instruction. If possible,
73 /// try to fold the load as an operand to the instruction, returning true if
74 /// possible.
75 bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
76 const LoadInst *LI) override;
77
78 bool fastLowerArguments() override;
79 bool fastLowerCall(CallLoweringInfo &CLI) override;
80 bool fastLowerIntrinsicCall(const IntrinsicInst *II) override;
81
82#include "X86GenFastISel.inc"
83
84private:
Benjamin Kramerbdc49562016-06-12 15:39:02 +000085 bool X86FastEmitCompare(const Value *LHS, const Value *RHS, EVT VT,
86 const DebugLoc &DL);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +000087
Pete Cooperd0dae3e2015-05-05 23:41:53 +000088 bool X86FastEmitLoad(EVT VT, X86AddressMode &AM, MachineMemOperand *MMO,
Andrea Di Biagio8f7feec2015-03-26 11:29:02 +000089 unsigned &ResultReg, unsigned Alignment = 1);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +000090
Pete Cooperd0dae3e2015-05-05 23:41:53 +000091 bool X86FastEmitStore(EVT VT, const Value *Val, X86AddressMode &AM,
Michael Kupersteine86aa9a2015-02-01 16:15:07 +000092 MachineMemOperand *MMO = nullptr, bool Aligned = false);
93 bool X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill,
Pete Cooperd0dae3e2015-05-05 23:41:53 +000094 X86AddressMode &AM,
Michael Kupersteine86aa9a2015-02-01 16:15:07 +000095 MachineMemOperand *MMO = nullptr, bool Aligned = false);
96
97 bool X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT,
98 unsigned &ResultReg);
99
100 bool X86SelectAddress(const Value *V, X86AddressMode &AM);
101 bool X86SelectCallAddress(const Value *V, X86AddressMode &AM);
102
103 bool X86SelectLoad(const Instruction *I);
104
105 bool X86SelectStore(const Instruction *I);
106
107 bool X86SelectRet(const Instruction *I);
108
109 bool X86SelectCmp(const Instruction *I);
110
111 bool X86SelectZExt(const Instruction *I);
112
113 bool X86SelectBranch(const Instruction *I);
114
115 bool X86SelectShift(const Instruction *I);
116
117 bool X86SelectDivRem(const Instruction *I);
118
119 bool X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I);
120
121 bool X86FastEmitSSESelect(MVT RetVT, const Instruction *I);
122
123 bool X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I);
124
125 bool X86SelectSelect(const Instruction *I);
126
127 bool X86SelectTrunc(const Instruction *I);
128
Andrea Di Biagio62622d22015-02-10 12:04:41 +0000129 bool X86SelectFPExtOrFPTrunc(const Instruction *I, unsigned Opc,
130 const TargetRegisterClass *RC);
131
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000132 bool X86SelectFPExt(const Instruction *I);
133 bool X86SelectFPTrunc(const Instruction *I);
Andrea Di Biagioe7b58ee2015-02-17 23:40:58 +0000134 bool X86SelectSIToFP(const Instruction *I);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000135
136 const X86InstrInfo *getInstrInfo() const {
Eric Christophera1c535b2015-02-02 23:03:45 +0000137 return Subtarget->getInstrInfo();
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000138 }
139 const X86TargetMachine *getTargetMachine() const {
140 return static_cast<const X86TargetMachine *>(&TM);
141 }
142
143 bool handleConstantAddresses(const Value *V, X86AddressMode &AM);
144
145 unsigned X86MaterializeInt(const ConstantInt *CI, MVT VT);
146 unsigned X86MaterializeFP(const ConstantFP *CFP, MVT VT);
147 unsigned X86MaterializeGV(const GlobalValue *GV, MVT VT);
148 unsigned fastMaterializeConstant(const Constant *C) override;
149
150 unsigned fastMaterializeAlloca(const AllocaInst *C) override;
151
152 unsigned fastMaterializeFloatZero(const ConstantFP *CF) override;
153
154 /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
155 /// computed in an SSE register, not on the X87 floating point stack.
156 bool isScalarFPTypeInSSEReg(EVT VT) const {
157 return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
158 (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
159 }
160
161 bool isTypeLegal(Type *Ty, MVT &VT, bool AllowI1 = false);
162
163 bool IsMemcpySmall(uint64_t Len);
164
165 bool TryEmitSmallMemcpy(X86AddressMode DestAM,
166 X86AddressMode SrcAM, uint64_t Len);
167
168 bool foldX86XALUIntrinsic(X86::CondCode &CC, const Instruction *I,
169 const Value *Cond);
Pete Cooperd0dae3e2015-05-05 23:41:53 +0000170
171 const MachineInstrBuilder &addFullAddress(const MachineInstrBuilder &MIB,
172 X86AddressMode &AM);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000173};
174
175} // end anonymous namespace.
176
177static std::pair<X86::CondCode, bool>
178getX86ConditionCode(CmpInst::Predicate Predicate) {
179 X86::CondCode CC = X86::COND_INVALID;
180 bool NeedSwap = false;
181 switch (Predicate) {
182 default: break;
183 // Floating-point Predicates
184 case CmpInst::FCMP_UEQ: CC = X86::COND_E; break;
185 case CmpInst::FCMP_OLT: NeedSwap = true; // fall-through
186 case CmpInst::FCMP_OGT: CC = X86::COND_A; break;
187 case CmpInst::FCMP_OLE: NeedSwap = true; // fall-through
188 case CmpInst::FCMP_OGE: CC = X86::COND_AE; break;
189 case CmpInst::FCMP_UGT: NeedSwap = true; // fall-through
190 case CmpInst::FCMP_ULT: CC = X86::COND_B; break;
191 case CmpInst::FCMP_UGE: NeedSwap = true; // fall-through
192 case CmpInst::FCMP_ULE: CC = X86::COND_BE; break;
193 case CmpInst::FCMP_ONE: CC = X86::COND_NE; break;
194 case CmpInst::FCMP_UNO: CC = X86::COND_P; break;
195 case CmpInst::FCMP_ORD: CC = X86::COND_NP; break;
196 case CmpInst::FCMP_OEQ: // fall-through
197 case CmpInst::FCMP_UNE: CC = X86::COND_INVALID; break;
198
199 // Integer Predicates
200 case CmpInst::ICMP_EQ: CC = X86::COND_E; break;
201 case CmpInst::ICMP_NE: CC = X86::COND_NE; break;
202 case CmpInst::ICMP_UGT: CC = X86::COND_A; break;
203 case CmpInst::ICMP_UGE: CC = X86::COND_AE; break;
204 case CmpInst::ICMP_ULT: CC = X86::COND_B; break;
205 case CmpInst::ICMP_ULE: CC = X86::COND_BE; break;
206 case CmpInst::ICMP_SGT: CC = X86::COND_G; break;
207 case CmpInst::ICMP_SGE: CC = X86::COND_GE; break;
208 case CmpInst::ICMP_SLT: CC = X86::COND_L; break;
209 case CmpInst::ICMP_SLE: CC = X86::COND_LE; break;
210 }
211
212 return std::make_pair(CC, NeedSwap);
213}
214
215static std::pair<unsigned, bool>
216getX86SSEConditionCode(CmpInst::Predicate Predicate) {
217 unsigned CC;
218 bool NeedSwap = false;
219
220 // SSE Condition code mapping:
221 // 0 - EQ
222 // 1 - LT
223 // 2 - LE
224 // 3 - UNORD
225 // 4 - NEQ
226 // 5 - NLT
227 // 6 - NLE
228 // 7 - ORD
229 switch (Predicate) {
230 default: llvm_unreachable("Unexpected predicate");
231 case CmpInst::FCMP_OEQ: CC = 0; break;
232 case CmpInst::FCMP_OGT: NeedSwap = true; // fall-through
233 case CmpInst::FCMP_OLT: CC = 1; break;
234 case CmpInst::FCMP_OGE: NeedSwap = true; // fall-through
235 case CmpInst::FCMP_OLE: CC = 2; break;
236 case CmpInst::FCMP_UNO: CC = 3; break;
237 case CmpInst::FCMP_UNE: CC = 4; break;
238 case CmpInst::FCMP_ULE: NeedSwap = true; // fall-through
239 case CmpInst::FCMP_UGE: CC = 5; break;
240 case CmpInst::FCMP_ULT: NeedSwap = true; // fall-through
241 case CmpInst::FCMP_UGT: CC = 6; break;
242 case CmpInst::FCMP_ORD: CC = 7; break;
243 case CmpInst::FCMP_UEQ:
244 case CmpInst::FCMP_ONE: CC = 8; break;
245 }
246
247 return std::make_pair(CC, NeedSwap);
248}
249
Pete Cooperd0dae3e2015-05-05 23:41:53 +0000250/// \brief Adds a complex addressing mode to the given machine instr builder.
251/// Note, this will constrain the index register. If its not possible to
252/// constrain the given index register, then a new one will be created. The
253/// IndexReg field of the addressing mode will be updated to match in this case.
254const MachineInstrBuilder &
255X86FastISel::addFullAddress(const MachineInstrBuilder &MIB,
256 X86AddressMode &AM) {
257 // First constrain the index register. It needs to be a GR64_NOSP.
258 AM.IndexReg = constrainOperandRegClass(MIB->getDesc(), AM.IndexReg,
259 MIB->getNumOperands() +
260 X86::AddrIndexReg);
261 return ::addFullAddress(MIB, AM);
262}
263
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000264/// \brief Check if it is possible to fold the condition from the XALU intrinsic
265/// into the user. The condition code will only be updated on success.
266bool X86FastISel::foldX86XALUIntrinsic(X86::CondCode &CC, const Instruction *I,
267 const Value *Cond) {
268 if (!isa<ExtractValueInst>(Cond))
269 return false;
270
271 const auto *EV = cast<ExtractValueInst>(Cond);
272 if (!isa<IntrinsicInst>(EV->getAggregateOperand()))
273 return false;
274
275 const auto *II = cast<IntrinsicInst>(EV->getAggregateOperand());
276 MVT RetVT;
277 const Function *Callee = II->getCalledFunction();
278 Type *RetTy =
279 cast<StructType>(Callee->getReturnType())->getTypeAtIndex(0U);
280 if (!isTypeLegal(RetTy, RetVT))
281 return false;
282
283 if (RetVT != MVT::i32 && RetVT != MVT::i64)
284 return false;
285
286 X86::CondCode TmpCC;
287 switch (II->getIntrinsicID()) {
288 default: return false;
289 case Intrinsic::sadd_with_overflow:
290 case Intrinsic::ssub_with_overflow:
291 case Intrinsic::smul_with_overflow:
292 case Intrinsic::umul_with_overflow: TmpCC = X86::COND_O; break;
293 case Intrinsic::uadd_with_overflow:
294 case Intrinsic::usub_with_overflow: TmpCC = X86::COND_B; break;
295 }
296
297 // Check if both instructions are in the same basic block.
298 if (II->getParent() != I->getParent())
299 return false;
300
301 // Make sure nothing is in the way
Duncan P. N. Exon Smithd77de642015-10-19 21:48:29 +0000302 BasicBlock::const_iterator Start(I);
303 BasicBlock::const_iterator End(II);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000304 for (auto Itr = std::prev(Start); Itr != End; --Itr) {
305 // We only expect extractvalue instructions between the intrinsic and the
306 // instruction to be selected.
307 if (!isa<ExtractValueInst>(Itr))
308 return false;
309
310 // Check that the extractvalue operand comes from the intrinsic.
311 const auto *EVI = cast<ExtractValueInst>(Itr);
312 if (EVI->getAggregateOperand() != II)
313 return false;
314 }
315
316 CC = TmpCC;
317 return true;
318}
319
320bool X86FastISel::isTypeLegal(Type *Ty, MVT &VT, bool AllowI1) {
Mehdi Amini44ede332015-07-09 02:09:04 +0000321 EVT evt = TLI.getValueType(DL, Ty, /*HandleUnknown=*/true);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000322 if (evt == MVT::Other || !evt.isSimple())
323 // Unhandled type. Halt "fast" selection and bail.
324 return false;
325
326 VT = evt.getSimpleVT();
327 // For now, require SSE/SSE2 for performing floating-point operations,
328 // since x87 requires additional work.
329 if (VT == MVT::f64 && !X86ScalarSSEf64)
330 return false;
331 if (VT == MVT::f32 && !X86ScalarSSEf32)
332 return false;
333 // Similarly, no f80 support yet.
334 if (VT == MVT::f80)
335 return false;
336 // We only handle legal types. For example, on x86-32 the instruction
337 // selector contains all of the 64-bit instructions from x86-64,
338 // under the assumption that i64 won't be used if the target doesn't
339 // support it.
340 return (AllowI1 && VT == MVT::i1) || TLI.isTypeLegal(VT);
341}
342
343#include "X86GenCallingConv.inc"
344
345/// X86FastEmitLoad - Emit a machine instruction to load a value of type VT.
346/// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV.
347/// Return true and the result register by reference if it is possible.
Pete Cooperd0dae3e2015-05-05 23:41:53 +0000348bool X86FastISel::X86FastEmitLoad(EVT VT, X86AddressMode &AM,
Andrea Di Biagio8f7feec2015-03-26 11:29:02 +0000349 MachineMemOperand *MMO, unsigned &ResultReg,
350 unsigned Alignment) {
Simon Pilgrim35c06a02016-06-07 13:47:23 +0000351 bool HasSSE41 = Subtarget->hasSSE41();
Craig Topperca9c0802016-06-02 04:19:45 +0000352 bool HasAVX = Subtarget->hasAVX();
Simon Pilgrim35c06a02016-06-07 13:47:23 +0000353 bool HasAVX2 = Subtarget->hasAVX2();
354 bool IsNonTemporal = MMO && MMO->isNonTemporal();
355
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000356 // Get opcode and regclass of the output for the given load instruction.
357 unsigned Opc = 0;
358 const TargetRegisterClass *RC = nullptr;
359 switch (VT.getSimpleVT().SimpleTy) {
360 default: return false;
361 case MVT::i1:
362 case MVT::i8:
363 Opc = X86::MOV8rm;
364 RC = &X86::GR8RegClass;
365 break;
366 case MVT::i16:
367 Opc = X86::MOV16rm;
368 RC = &X86::GR16RegClass;
369 break;
370 case MVT::i32:
371 Opc = X86::MOV32rm;
372 RC = &X86::GR32RegClass;
373 break;
374 case MVT::i64:
375 // Must be in x86-64 mode.
376 Opc = X86::MOV64rm;
377 RC = &X86::GR64RegClass;
378 break;
379 case MVT::f32:
380 if (X86ScalarSSEf32) {
Craig Topperca9c0802016-06-02 04:19:45 +0000381 Opc = HasAVX ? X86::VMOVSSrm : X86::MOVSSrm;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000382 RC = &X86::FR32RegClass;
383 } else {
384 Opc = X86::LD_Fp32m;
385 RC = &X86::RFP32RegClass;
386 }
387 break;
388 case MVT::f64:
389 if (X86ScalarSSEf64) {
Craig Topperca9c0802016-06-02 04:19:45 +0000390 Opc = HasAVX ? X86::VMOVSDrm : X86::MOVSDrm;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000391 RC = &X86::FR64RegClass;
392 } else {
393 Opc = X86::LD_Fp64m;
394 RC = &X86::RFP64RegClass;
395 }
396 break;
397 case MVT::f80:
398 // No f80 support yet.
399 return false;
Andrea Di Biagio8f7feec2015-03-26 11:29:02 +0000400 case MVT::v4f32:
Simon Pilgrim35c06a02016-06-07 13:47:23 +0000401 if (IsNonTemporal && Alignment >= 16 && HasSSE41)
402 Opc = HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm;
403 else if (Alignment >= 16)
Craig Topperca9c0802016-06-02 04:19:45 +0000404 Opc = HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm;
Andrea Di Biagio8f7feec2015-03-26 11:29:02 +0000405 else
Craig Topperca9c0802016-06-02 04:19:45 +0000406 Opc = HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm;
Andrea Di Biagio8f7feec2015-03-26 11:29:02 +0000407 RC = &X86::VR128RegClass;
408 break;
409 case MVT::v2f64:
Simon Pilgrim35c06a02016-06-07 13:47:23 +0000410 if (IsNonTemporal && Alignment >= 16 && HasSSE41)
411 Opc = HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm;
412 else if (Alignment >= 16)
Craig Topperca9c0802016-06-02 04:19:45 +0000413 Opc = HasAVX ? X86::VMOVAPDrm : X86::MOVAPDrm;
Andrea Di Biagio8f7feec2015-03-26 11:29:02 +0000414 else
Craig Topperca9c0802016-06-02 04:19:45 +0000415 Opc = HasAVX ? X86::VMOVUPDrm : X86::MOVUPDrm;
Andrea Di Biagio8f7feec2015-03-26 11:29:02 +0000416 RC = &X86::VR128RegClass;
417 break;
418 case MVT::v4i32:
419 case MVT::v2i64:
420 case MVT::v8i16:
421 case MVT::v16i8:
Simon Pilgrim35c06a02016-06-07 13:47:23 +0000422 if (IsNonTemporal && Alignment >= 16)
423 Opc = HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm;
424 else if (Alignment >= 16)
Craig Topperca9c0802016-06-02 04:19:45 +0000425 Opc = HasAVX ? X86::VMOVDQArm : X86::MOVDQArm;
Andrea Di Biagio8f7feec2015-03-26 11:29:02 +0000426 else
Craig Topperca9c0802016-06-02 04:19:45 +0000427 Opc = HasAVX ? X86::VMOVDQUrm : X86::MOVDQUrm;
Andrea Di Biagio8f7feec2015-03-26 11:29:02 +0000428 RC = &X86::VR128RegClass;
429 break;
Craig Topperca9c0802016-06-02 04:19:45 +0000430 case MVT::v8f32:
431 assert(HasAVX);
Simon Pilgrim35c06a02016-06-07 13:47:23 +0000432 if (IsNonTemporal && Alignment >= 32 && HasAVX2)
433 Opc = X86::VMOVNTDQAYrm;
434 else
435 Opc = (Alignment >= 32) ? X86::VMOVAPSYrm : X86::VMOVUPSYrm;
Craig Topperca9c0802016-06-02 04:19:45 +0000436 RC = &X86::VR256RegClass;
437 break;
438 case MVT::v4f64:
439 assert(HasAVX);
Simon Pilgrim35c06a02016-06-07 13:47:23 +0000440 if (IsNonTemporal && Alignment >= 32 && HasAVX2)
441 Opc = X86::VMOVNTDQAYrm;
442 else
443 Opc = (Alignment >= 32) ? X86::VMOVAPDYrm : X86::VMOVUPDYrm;
Craig Topperca9c0802016-06-02 04:19:45 +0000444 RC = &X86::VR256RegClass;
445 break;
446 case MVT::v8i32:
447 case MVT::v4i64:
448 case MVT::v16i16:
449 case MVT::v32i8:
450 assert(HasAVX);
Simon Pilgrim35c06a02016-06-07 13:47:23 +0000451 if (IsNonTemporal && Alignment >= 32 && HasAVX2)
452 Opc = X86::VMOVNTDQAYrm;
453 else
454 Opc = (Alignment >= 32) ? X86::VMOVDQAYrm : X86::VMOVDQUYrm;
Craig Topperca9c0802016-06-02 04:19:45 +0000455 RC = &X86::VR256RegClass;
456 break;
Craig Topper048a08a2016-06-02 04:51:37 +0000457 case MVT::v16f32:
458 assert(Subtarget->hasAVX512());
Simon Pilgrim35c06a02016-06-07 13:47:23 +0000459 if (IsNonTemporal && Alignment >= 64)
460 Opc = X86::VMOVNTDQAZrm;
461 else
462 Opc = (Alignment >= 64) ? X86::VMOVAPSZrm : X86::VMOVUPSZrm;
Craig Topper048a08a2016-06-02 04:51:37 +0000463 RC = &X86::VR512RegClass;
464 break;
465 case MVT::v8f64:
466 assert(Subtarget->hasAVX512());
Simon Pilgrim35c06a02016-06-07 13:47:23 +0000467 if (IsNonTemporal && Alignment >= 64)
468 Opc = X86::VMOVNTDQAZrm;
469 else
470 Opc = (Alignment >= 64) ? X86::VMOVAPDZrm : X86::VMOVUPDZrm;
Craig Topper048a08a2016-06-02 04:51:37 +0000471 RC = &X86::VR512RegClass;
472 break;
473 case MVT::v8i64:
474 case MVT::v16i32:
475 case MVT::v32i16:
476 case MVT::v64i8:
477 assert(Subtarget->hasAVX512());
478 // Note: There are a lot more choices based on type with AVX-512, but
479 // there's really no advantage when the load isn't masked.
Simon Pilgrim35c06a02016-06-07 13:47:23 +0000480 if (IsNonTemporal && Alignment >= 64)
481 Opc = X86::VMOVNTDQAZrm;
482 else
483 Opc = (Alignment >= 64) ? X86::VMOVDQA64Zrm : X86::VMOVDQU64Zrm;
Craig Topper048a08a2016-06-02 04:51:37 +0000484 RC = &X86::VR512RegClass;
485 break;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000486 }
487
488 ResultReg = createResultReg(RC);
489 MachineInstrBuilder MIB =
490 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg);
491 addFullAddress(MIB, AM);
492 if (MMO)
493 MIB->addMemOperand(*FuncInfo.MF, MMO);
494 return true;
495}
496
497/// X86FastEmitStore - Emit a machine instruction to store a value Val of
498/// type VT. The address is either pre-computed, consisted of a base ptr, Ptr
499/// and a displacement offset, or a GlobalAddress,
500/// i.e. V. Return true if it is possible.
501bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill,
Pete Cooperd0dae3e2015-05-05 23:41:53 +0000502 X86AddressMode &AM,
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000503 MachineMemOperand *MMO, bool Aligned) {
Andrea Di Biagioc47edbe2015-10-14 10:03:13 +0000504 bool HasSSE2 = Subtarget->hasSSE2();
Simon Pilgrim5b65f282015-10-17 13:04:42 +0000505 bool HasSSE4A = Subtarget->hasSSE4A();
Andrea Di Biagioc47edbe2015-10-14 10:03:13 +0000506 bool HasAVX = Subtarget->hasAVX();
507 bool IsNonTemporal = MMO && MMO->isNonTemporal();
508
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000509 // Get opcode and regclass of the output for the given store instruction.
510 unsigned Opc = 0;
511 switch (VT.getSimpleVT().SimpleTy) {
512 case MVT::f80: // No f80 support yet.
513 default: return false;
514 case MVT::i1: {
515 // Mask out all but lowest bit.
516 unsigned AndResult = createResultReg(&X86::GR8RegClass);
517 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
518 TII.get(X86::AND8ri), AndResult)
519 .addReg(ValReg, getKillRegState(ValIsKill)).addImm(1);
520 ValReg = AndResult;
521 }
522 // FALLTHROUGH, handling i1 as i8.
523 case MVT::i8: Opc = X86::MOV8mr; break;
524 case MVT::i16: Opc = X86::MOV16mr; break;
Andrea Di Biagioc47edbe2015-10-14 10:03:13 +0000525 case MVT::i32:
526 Opc = (IsNonTemporal && HasSSE2) ? X86::MOVNTImr : X86::MOV32mr;
527 break;
528 case MVT::i64:
529 // Must be in x86-64 mode.
530 Opc = (IsNonTemporal && HasSSE2) ? X86::MOVNTI_64mr : X86::MOV64mr;
531 break;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000532 case MVT::f32:
Simon Pilgrim5b65f282015-10-17 13:04:42 +0000533 if (X86ScalarSSEf32) {
534 if (IsNonTemporal && HasSSE4A)
535 Opc = X86::MOVNTSS;
536 else
537 Opc = HasAVX ? X86::VMOVSSmr : X86::MOVSSmr;
538 } else
539 Opc = X86::ST_Fp32m;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000540 break;
541 case MVT::f64:
Simon Pilgrim5b65f282015-10-17 13:04:42 +0000542 if (X86ScalarSSEf32) {
543 if (IsNonTemporal && HasSSE4A)
544 Opc = X86::MOVNTSD;
545 else
546 Opc = HasAVX ? X86::VMOVSDmr : X86::MOVSDmr;
547 } else
548 Opc = X86::ST_Fp64m;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000549 break;
550 case MVT::v4f32:
Andrea Di Biagioc47edbe2015-10-14 10:03:13 +0000551 if (Aligned) {
552 if (IsNonTemporal)
553 Opc = HasAVX ? X86::VMOVNTPSmr : X86::MOVNTPSmr;
554 else
555 Opc = HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr;
556 } else
557 Opc = HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000558 break;
559 case MVT::v2f64:
Andrea Di Biagioc47edbe2015-10-14 10:03:13 +0000560 if (Aligned) {
561 if (IsNonTemporal)
562 Opc = HasAVX ? X86::VMOVNTPDmr : X86::MOVNTPDmr;
563 else
564 Opc = HasAVX ? X86::VMOVAPDmr : X86::MOVAPDmr;
565 } else
566 Opc = HasAVX ? X86::VMOVUPDmr : X86::MOVUPDmr;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000567 break;
568 case MVT::v4i32:
569 case MVT::v2i64:
570 case MVT::v8i16:
571 case MVT::v16i8:
Andrea Di Biagioc47edbe2015-10-14 10:03:13 +0000572 if (Aligned) {
573 if (IsNonTemporal)
574 Opc = HasAVX ? X86::VMOVNTDQmr : X86::MOVNTDQmr;
575 else
576 Opc = HasAVX ? X86::VMOVDQAmr : X86::MOVDQAmr;
577 } else
Craig Topperca9c0802016-06-02 04:19:45 +0000578 Opc = HasAVX ? X86::VMOVDQUmr : X86::MOVDQUmr;
579 break;
580 case MVT::v8f32:
581 assert(HasAVX);
582 if (Aligned)
583 Opc = IsNonTemporal ? X86::VMOVNTPSYmr : X86::VMOVAPSYmr;
584 else
585 Opc = X86::VMOVUPSYmr;
586 break;
587 case MVT::v4f64:
588 assert(HasAVX);
589 if (Aligned) {
590 Opc = IsNonTemporal ? X86::VMOVNTPDYmr : X86::VMOVAPDYmr;
591 } else
592 Opc = X86::VMOVUPDYmr;
593 break;
594 case MVT::v8i32:
595 case MVT::v4i64:
596 case MVT::v16i16:
597 case MVT::v32i8:
598 assert(HasAVX);
599 if (Aligned)
600 Opc = IsNonTemporal ? X86::VMOVNTDQYmr : X86::VMOVDQAYmr;
601 else
602 Opc = X86::VMOVDQUYmr;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000603 break;
Craig Topper048a08a2016-06-02 04:51:37 +0000604 case MVT::v16f32:
605 assert(Subtarget->hasAVX512());
606 if (Aligned)
607 Opc = IsNonTemporal ? X86::VMOVNTPSZmr : X86::VMOVAPSZmr;
608 else
609 Opc = X86::VMOVUPSZmr;
610 break;
611 case MVT::v8f64:
612 assert(Subtarget->hasAVX512());
613 if (Aligned) {
614 Opc = IsNonTemporal ? X86::VMOVNTPDZmr : X86::VMOVAPDZmr;
615 } else
616 Opc = X86::VMOVUPDZmr;
617 break;
618 case MVT::v8i64:
619 case MVT::v16i32:
620 case MVT::v32i16:
621 case MVT::v64i8:
622 assert(Subtarget->hasAVX512());
623 // Note: There are a lot more choices based on type with AVX-512, but
624 // there's really no advantage when the store isn't masked.
625 if (Aligned)
626 Opc = IsNonTemporal ? X86::VMOVNTDQZmr : X86::VMOVDQA64Zmr;
627 else
628 Opc = X86::VMOVDQU64Zmr;
629 break;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000630 }
631
Quentin Colombetbf200682016-04-27 22:33:42 +0000632 const MCInstrDesc &Desc = TII.get(Opc);
633 // Some of the instructions in the previous switch use FR128 instead
634 // of FR32 for ValReg. Make sure the register we feed the instruction
635 // matches its register class constraints.
636 // Note: This is fine to do a copy from FR32 to FR128, this is the
637 // same registers behind the scene and actually why it did not trigger
638 // any bugs before.
639 ValReg = constrainOperandRegClass(Desc, ValReg, Desc.getNumOperands() - 1);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000640 MachineInstrBuilder MIB =
Quentin Colombetbf200682016-04-27 22:33:42 +0000641 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, Desc);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000642 addFullAddress(MIB, AM).addReg(ValReg, getKillRegState(ValIsKill));
643 if (MMO)
644 MIB->addMemOperand(*FuncInfo.MF, MMO);
645
646 return true;
647}
648
649bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
Pete Cooperd0dae3e2015-05-05 23:41:53 +0000650 X86AddressMode &AM,
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000651 MachineMemOperand *MMO, bool Aligned) {
652 // Handle 'null' like i32/i64 0.
653 if (isa<ConstantPointerNull>(Val))
654 Val = Constant::getNullValue(DL.getIntPtrType(Val->getContext()));
655
656 // If this is a store of a simple constant, fold the constant into the store.
657 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
658 unsigned Opc = 0;
659 bool Signed = true;
660 switch (VT.getSimpleVT().SimpleTy) {
661 default: break;
662 case MVT::i1: Signed = false; // FALLTHROUGH to handle as i8.
663 case MVT::i8: Opc = X86::MOV8mi; break;
664 case MVT::i16: Opc = X86::MOV16mi; break;
665 case MVT::i32: Opc = X86::MOV32mi; break;
666 case MVT::i64:
667 // Must be a 32-bit sign extended value.
668 if (isInt<32>(CI->getSExtValue()))
669 Opc = X86::MOV64mi32;
670 break;
671 }
672
673 if (Opc) {
674 MachineInstrBuilder MIB =
675 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc));
676 addFullAddress(MIB, AM).addImm(Signed ? (uint64_t) CI->getSExtValue()
677 : CI->getZExtValue());
678 if (MMO)
679 MIB->addMemOperand(*FuncInfo.MF, MMO);
680 return true;
681 }
682 }
683
684 unsigned ValReg = getRegForValue(Val);
685 if (ValReg == 0)
686 return false;
687
688 bool ValKill = hasTrivialKill(Val);
689 return X86FastEmitStore(VT, ValReg, ValKill, AM, MMO, Aligned);
690}
691
692/// X86FastEmitExtend - Emit a machine instruction to extend a value Src of
693/// type SrcVT to type DstVT using the specified extension opcode Opc (e.g.
694/// ISD::SIGN_EXTEND).
695bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT,
696 unsigned Src, EVT SrcVT,
697 unsigned &ResultReg) {
698 unsigned RR = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc,
699 Src, /*TODO: Kill=*/false);
700 if (RR == 0)
701 return false;
702
703 ResultReg = RR;
704 return true;
705}
706
707bool X86FastISel::handleConstantAddresses(const Value *V, X86AddressMode &AM) {
708 // Handle constant address.
709 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
710 // Can't handle alternate code models yet.
711 if (TM.getCodeModel() != CodeModel::Small)
712 return false;
713
714 // Can't handle TLS yet.
715 if (GV->isThreadLocal())
716 return false;
717
718 // RIP-relative addresses can't have additional register operands, so if
719 // we've already folded stuff into the addressing mode, just force the
720 // global value into its own register, which we can use as the basereg.
721 if (!Subtarget->isPICStyleRIPRel() ||
722 (AM.Base.Reg == 0 && AM.IndexReg == 0)) {
723 // Okay, we've committed to selecting this global. Set up the address.
724 AM.GV = GV;
725
726 // Allow the subtarget to classify the global.
Rafael Espindolaab03eb02016-05-19 22:07:57 +0000727 unsigned char GVFlags = Subtarget->classifyGlobalReference(GV);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000728
729 // If this reference is relative to the pic base, set it now.
730 if (isGlobalRelativeToPICBase(GVFlags)) {
731 // FIXME: How do we know Base.Reg is free??
732 AM.Base.Reg = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
733 }
734
735 // Unless the ABI requires an extra load, return a direct reference to
736 // the global.
737 if (!isGlobalStubReference(GVFlags)) {
738 if (Subtarget->isPICStyleRIPRel()) {
739 // Use rip-relative addressing if we can. Above we verified that the
740 // base and index registers are unused.
741 assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
742 AM.Base.Reg = X86::RIP;
743 }
744 AM.GVOpFlags = GVFlags;
745 return true;
746 }
747
748 // Ok, we need to do a load from a stub. If we've already loaded from
749 // this stub, reuse the loaded pointer, otherwise emit the load now.
750 DenseMap<const Value *, unsigned>::iterator I = LocalValueMap.find(V);
751 unsigned LoadReg;
752 if (I != LocalValueMap.end() && I->second != 0) {
753 LoadReg = I->second;
754 } else {
755 // Issue load from stub.
756 unsigned Opc = 0;
757 const TargetRegisterClass *RC = nullptr;
758 X86AddressMode StubAM;
759 StubAM.Base.Reg = AM.Base.Reg;
760 StubAM.GV = GV;
761 StubAM.GVOpFlags = GVFlags;
762
763 // Prepare for inserting code in the local-value area.
764 SavePoint SaveInsertPt = enterLocalValueArea();
765
Mehdi Amini44ede332015-07-09 02:09:04 +0000766 if (TLI.getPointerTy(DL) == MVT::i64) {
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000767 Opc = X86::MOV64rm;
768 RC = &X86::GR64RegClass;
769
770 if (Subtarget->isPICStyleRIPRel())
771 StubAM.Base.Reg = X86::RIP;
772 } else {
773 Opc = X86::MOV32rm;
774 RC = &X86::GR32RegClass;
775 }
776
777 LoadReg = createResultReg(RC);
778 MachineInstrBuilder LoadMI =
779 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), LoadReg);
780 addFullAddress(LoadMI, StubAM);
781
782 // Ok, back to normal mode.
783 leaveLocalValueArea(SaveInsertPt);
784
785 // Prevent loading GV stub multiple times in same MBB.
786 LocalValueMap[V] = LoadReg;
787 }
788
789 // Now construct the final address. Note that the Disp, Scale,
790 // and Index values may already be set here.
791 AM.Base.Reg = LoadReg;
792 AM.GV = nullptr;
793 return true;
794 }
795 }
796
797 // If all else fails, try to materialize the value in a register.
798 if (!AM.GV || !Subtarget->isPICStyleRIPRel()) {
799 if (AM.Base.Reg == 0) {
800 AM.Base.Reg = getRegForValue(V);
801 return AM.Base.Reg != 0;
802 }
803 if (AM.IndexReg == 0) {
804 assert(AM.Scale == 1 && "Scale with no index!");
805 AM.IndexReg = getRegForValue(V);
806 return AM.IndexReg != 0;
807 }
808 }
809
810 return false;
811}
812
813/// X86SelectAddress - Attempt to fill in an address from the given value.
814///
815bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
816 SmallVector<const Value *, 32> GEPs;
817redo_gep:
818 const User *U = nullptr;
819 unsigned Opcode = Instruction::UserOp1;
820 if (const Instruction *I = dyn_cast<Instruction>(V)) {
821 // Don't walk into other basic blocks; it's possible we haven't
822 // visited them yet, so the instructions may not yet be assigned
823 // virtual registers.
824 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(V)) ||
825 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
826 Opcode = I->getOpcode();
827 U = I;
828 }
829 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
830 Opcode = C->getOpcode();
831 U = C;
832 }
833
834 if (PointerType *Ty = dyn_cast<PointerType>(V->getType()))
835 if (Ty->getAddressSpace() > 255)
836 // Fast instruction selection doesn't support the special
837 // address spaces.
838 return false;
839
840 switch (Opcode) {
841 default: break;
842 case Instruction::BitCast:
843 // Look past bitcasts.
844 return X86SelectAddress(U->getOperand(0), AM);
845
846 case Instruction::IntToPtr:
847 // Look past no-op inttoptrs.
Mehdi Amini44ede332015-07-09 02:09:04 +0000848 if (TLI.getValueType(DL, U->getOperand(0)->getType()) ==
849 TLI.getPointerTy(DL))
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000850 return X86SelectAddress(U->getOperand(0), AM);
851 break;
852
853 case Instruction::PtrToInt:
854 // Look past no-op ptrtoints.
Mehdi Amini44ede332015-07-09 02:09:04 +0000855 if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000856 return X86SelectAddress(U->getOperand(0), AM);
857 break;
858
859 case Instruction::Alloca: {
860 // Do static allocas.
861 const AllocaInst *A = cast<AllocaInst>(V);
862 DenseMap<const AllocaInst *, int>::iterator SI =
863 FuncInfo.StaticAllocaMap.find(A);
864 if (SI != FuncInfo.StaticAllocaMap.end()) {
865 AM.BaseType = X86AddressMode::FrameIndexBase;
866 AM.Base.FrameIndex = SI->second;
867 return true;
868 }
869 break;
870 }
871
872 case Instruction::Add: {
873 // Adds of constants are common and easy enough.
874 if (const ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
875 uint64_t Disp = (int32_t)AM.Disp + (uint64_t)CI->getSExtValue();
876 // They have to fit in the 32-bit signed displacement field though.
877 if (isInt<32>(Disp)) {
878 AM.Disp = (uint32_t)Disp;
879 return X86SelectAddress(U->getOperand(0), AM);
880 }
881 }
882 break;
883 }
884
885 case Instruction::GetElementPtr: {
886 X86AddressMode SavedAM = AM;
887
888 // Pattern-match simple GEPs.
889 uint64_t Disp = (int32_t)AM.Disp;
890 unsigned IndexReg = AM.IndexReg;
891 unsigned Scale = AM.Scale;
892 gep_type_iterator GTI = gep_type_begin(U);
893 // Iterate through the indices, folding what we can. Constants can be
894 // folded, and one dynamic index can be handled, if the scale is supported.
895 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
896 i != e; ++i, ++GTI) {
897 const Value *Op = *i;
898 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
899 const StructLayout *SL = DL.getStructLayout(STy);
900 Disp += SL->getElementOffset(cast<ConstantInt>(Op)->getZExtValue());
901 continue;
902 }
903
904 // A array/variable index is always of the form i*S where S is the
905 // constant scale size. See if we can push the scale into immediates.
906 uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType());
907 for (;;) {
908 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
909 // Constant-offset addressing.
910 Disp += CI->getSExtValue() * S;
911 break;
912 }
913 if (canFoldAddIntoGEP(U, Op)) {
914 // A compatible add with a constant operand. Fold the constant.
915 ConstantInt *CI =
916 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
917 Disp += CI->getSExtValue() * S;
918 // Iterate on the other operand.
919 Op = cast<AddOperator>(Op)->getOperand(0);
920 continue;
921 }
922 if (IndexReg == 0 &&
923 (!AM.GV || !Subtarget->isPICStyleRIPRel()) &&
924 (S == 1 || S == 2 || S == 4 || S == 8)) {
925 // Scaled-index addressing.
926 Scale = S;
927 IndexReg = getRegForGEPIndex(Op).first;
928 if (IndexReg == 0)
929 return false;
930 break;
931 }
932 // Unsupported.
933 goto unsupported_gep;
934 }
935 }
936
937 // Check for displacement overflow.
938 if (!isInt<32>(Disp))
939 break;
940
941 AM.IndexReg = IndexReg;
942 AM.Scale = Scale;
943 AM.Disp = (uint32_t)Disp;
944 GEPs.push_back(V);
945
946 if (const GetElementPtrInst *GEP =
947 dyn_cast<GetElementPtrInst>(U->getOperand(0))) {
948 // Ok, the GEP indices were covered by constant-offset and scaled-index
949 // addressing. Update the address state and move on to examining the base.
950 V = GEP;
951 goto redo_gep;
952 } else if (X86SelectAddress(U->getOperand(0), AM)) {
953 return true;
954 }
955
956 // If we couldn't merge the gep value into this addr mode, revert back to
957 // our address and just match the value instead of completely failing.
958 AM = SavedAM;
959
David Majnemerd7708772016-06-24 04:05:21 +0000960 for (const Value *I : reverse(GEPs))
961 if (handleConstantAddresses(I, AM))
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000962 return true;
963
964 return false;
965 unsupported_gep:
966 // Ok, the GEP indices weren't all covered.
967 break;
968 }
969 }
970
971 return handleConstantAddresses(V, AM);
972}
973
974/// X86SelectCallAddress - Attempt to fill in an address from the given value.
975///
976bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) {
977 const User *U = nullptr;
978 unsigned Opcode = Instruction::UserOp1;
979 const Instruction *I = dyn_cast<Instruction>(V);
980 // Record if the value is defined in the same basic block.
981 //
982 // This information is crucial to know whether or not folding an
983 // operand is valid.
984 // Indeed, FastISel generates or reuses a virtual register for all
985 // operands of all instructions it selects. Obviously, the definition and
986 // its uses must use the same virtual register otherwise the produced
987 // code is incorrect.
988 // Before instruction selection, FunctionLoweringInfo::set sets the virtual
989 // registers for values that are alive across basic blocks. This ensures
990 // that the values are consistently set between across basic block, even
991 // if different instruction selection mechanisms are used (e.g., a mix of
992 // SDISel and FastISel).
993 // For values local to a basic block, the instruction selection process
994 // generates these virtual registers with whatever method is appropriate
995 // for its needs. In particular, FastISel and SDISel do not share the way
996 // local virtual registers are set.
997 // Therefore, this is impossible (or at least unsafe) to share values
998 // between basic blocks unless they use the same instruction selection
999 // method, which is not guarantee for X86.
1000 // Moreover, things like hasOneUse could not be used accurately, if we
1001 // allow to reference values across basic blocks whereas they are not
1002 // alive across basic blocks initially.
1003 bool InMBB = true;
1004 if (I) {
1005 Opcode = I->getOpcode();
1006 U = I;
1007 InMBB = I->getParent() == FuncInfo.MBB->getBasicBlock();
1008 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
1009 Opcode = C->getOpcode();
1010 U = C;
1011 }
1012
1013 switch (Opcode) {
1014 default: break;
1015 case Instruction::BitCast:
1016 // Look past bitcasts if its operand is in the same BB.
1017 if (InMBB)
1018 return X86SelectCallAddress(U->getOperand(0), AM);
1019 break;
1020
1021 case Instruction::IntToPtr:
1022 // Look past no-op inttoptrs if its operand is in the same BB.
1023 if (InMBB &&
Mehdi Amini44ede332015-07-09 02:09:04 +00001024 TLI.getValueType(DL, U->getOperand(0)->getType()) ==
1025 TLI.getPointerTy(DL))
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001026 return X86SelectCallAddress(U->getOperand(0), AM);
1027 break;
1028
1029 case Instruction::PtrToInt:
1030 // Look past no-op ptrtoints if its operand is in the same BB.
Mehdi Amini44ede332015-07-09 02:09:04 +00001031 if (InMBB && TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001032 return X86SelectCallAddress(U->getOperand(0), AM);
1033 break;
1034 }
1035
1036 // Handle constant address.
1037 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
1038 // Can't handle alternate code models yet.
1039 if (TM.getCodeModel() != CodeModel::Small)
1040 return false;
1041
1042 // RIP-relative addresses can't have additional register operands.
1043 if (Subtarget->isPICStyleRIPRel() &&
1044 (AM.Base.Reg != 0 || AM.IndexReg != 0))
1045 return false;
1046
1047 // Can't handle DLL Import.
1048 if (GV->hasDLLImportStorageClass())
1049 return false;
1050
1051 // Can't handle TLS.
1052 if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
1053 if (GVar->isThreadLocal())
1054 return false;
1055
1056 // Okay, we've committed to selecting this global. Set up the basic address.
1057 AM.GV = GV;
1058
1059 // No ABI requires an extra load for anything other than DLLImport, which
1060 // we rejected above. Return a direct reference to the global.
1061 if (Subtarget->isPICStyleRIPRel()) {
1062 // Use rip-relative addressing if we can. Above we verified that the
1063 // base and index registers are unused.
1064 assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
1065 AM.Base.Reg = X86::RIP;
Rafael Espindolac7e98132016-05-20 12:20:10 +00001066 } else {
1067 AM.GVOpFlags = Subtarget->classifyLocalReference(nullptr);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001068 }
1069
1070 return true;
1071 }
1072
1073 // If all else fails, try to materialize the value in a register.
1074 if (!AM.GV || !Subtarget->isPICStyleRIPRel()) {
1075 if (AM.Base.Reg == 0) {
1076 AM.Base.Reg = getRegForValue(V);
1077 return AM.Base.Reg != 0;
1078 }
1079 if (AM.IndexReg == 0) {
1080 assert(AM.Scale == 1 && "Scale with no index!");
1081 AM.IndexReg = getRegForValue(V);
1082 return AM.IndexReg != 0;
1083 }
1084 }
1085
1086 return false;
1087}
1088
1089
1090/// X86SelectStore - Select and emit code to implement store instructions.
1091bool X86FastISel::X86SelectStore(const Instruction *I) {
1092 // Atomic stores need special handling.
1093 const StoreInst *S = cast<StoreInst>(I);
1094
1095 if (S->isAtomic())
1096 return false;
1097
Manman Ren57518142016-04-11 21:08:06 +00001098 const Value *PtrV = I->getOperand(1);
1099 if (TLI.supportSwiftError()) {
1100 // Swifterror values can come from either a function parameter with
1101 // swifterror attribute or an alloca with swifterror attribute.
1102 if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
1103 if (Arg->hasSwiftErrorAttr())
1104 return false;
1105 }
1106
1107 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
1108 if (Alloca->isSwiftError())
1109 return false;
1110 }
1111 }
1112
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001113 const Value *Val = S->getValueOperand();
1114 const Value *Ptr = S->getPointerOperand();
1115
1116 MVT VT;
1117 if (!isTypeLegal(Val->getType(), VT, /*AllowI1=*/true))
1118 return false;
1119
1120 unsigned Alignment = S->getAlignment();
1121 unsigned ABIAlignment = DL.getABITypeAlignment(Val->getType());
1122 if (Alignment == 0) // Ensure that codegen never sees alignment 0
1123 Alignment = ABIAlignment;
1124 bool Aligned = Alignment >= ABIAlignment;
1125
1126 X86AddressMode AM;
1127 if (!X86SelectAddress(Ptr, AM))
1128 return false;
1129
1130 return X86FastEmitStore(VT, Val, AM, createMachineMemOperandFor(I), Aligned);
1131}
1132
1133/// X86SelectRet - Select and emit code to implement ret instructions.
1134bool X86FastISel::X86SelectRet(const Instruction *I) {
1135 const ReturnInst *Ret = cast<ReturnInst>(I);
1136 const Function &F = *I->getParent()->getParent();
1137 const X86MachineFunctionInfo *X86MFInfo =
1138 FuncInfo.MF->getInfo<X86MachineFunctionInfo>();
1139
1140 if (!FuncInfo.CanLowerReturn)
1141 return false;
1142
Manman Ren57518142016-04-11 21:08:06 +00001143 if (TLI.supportSwiftError() &&
1144 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError))
1145 return false;
1146
Manman Rened967f32016-01-12 01:08:46 +00001147 if (TLI.supportSplitCSR(FuncInfo.MF))
1148 return false;
1149
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001150 CallingConv::ID CC = F.getCallingConv();
1151 if (CC != CallingConv::C &&
1152 CC != CallingConv::Fast &&
1153 CC != CallingConv::X86_FastCall &&
1154 CC != CallingConv::X86_64_SysV)
1155 return false;
1156
1157 if (Subtarget->isCallingConvWin64(CC))
1158 return false;
1159
1160 // Don't handle popping bytes on return for now.
1161 if (X86MFInfo->getBytesToPopOnReturn() != 0)
1162 return false;
1163
1164 // fastcc with -tailcallopt is intended to provide a guaranteed
1165 // tail call optimization. Fastisel doesn't know how to do that.
1166 if (CC == CallingConv::Fast && TM.Options.GuaranteedTailCallOpt)
1167 return false;
1168
1169 // Let SDISel handle vararg functions.
1170 if (F.isVarArg())
1171 return false;
1172
1173 // Build a list of return value registers.
1174 SmallVector<unsigned, 4> RetRegs;
1175
1176 if (Ret->getNumOperands() > 0) {
1177 SmallVector<ISD::OutputArg, 4> Outs;
Mehdi Amini44ede332015-07-09 02:09:04 +00001178 GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001179
1180 // Analyze operands of the call, assigning locations to each operand.
1181 SmallVector<CCValAssign, 16> ValLocs;
1182 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs, I->getContext());
1183 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
1184
1185 const Value *RV = Ret->getOperand(0);
1186 unsigned Reg = getRegForValue(RV);
1187 if (Reg == 0)
1188 return false;
1189
1190 // Only handle a single return value for now.
1191 if (ValLocs.size() != 1)
1192 return false;
1193
1194 CCValAssign &VA = ValLocs[0];
1195
1196 // Don't bother handling odd stuff for now.
1197 if (VA.getLocInfo() != CCValAssign::Full)
1198 return false;
1199 // Only handle register returns for now.
1200 if (!VA.isRegLoc())
1201 return false;
1202
1203 // The calling-convention tables for x87 returns don't tell
1204 // the whole story.
1205 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
1206 return false;
1207
1208 unsigned SrcReg = Reg + VA.getValNo();
Mehdi Amini44ede332015-07-09 02:09:04 +00001209 EVT SrcVT = TLI.getValueType(DL, RV->getType());
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001210 EVT DstVT = VA.getValVT();
1211 // Special handling for extended integers.
1212 if (SrcVT != DstVT) {
1213 if (SrcVT != MVT::i1 && SrcVT != MVT::i8 && SrcVT != MVT::i16)
1214 return false;
1215
1216 if (!Outs[0].Flags.isZExt() && !Outs[0].Flags.isSExt())
1217 return false;
1218
1219 assert(DstVT == MVT::i32 && "X86 should always ext to i32");
1220
1221 if (SrcVT == MVT::i1) {
1222 if (Outs[0].Flags.isSExt())
1223 return false;
1224 SrcReg = fastEmitZExtFromI1(MVT::i8, SrcReg, /*TODO: Kill=*/false);
1225 SrcVT = MVT::i8;
1226 }
1227 unsigned Op = Outs[0].Flags.isZExt() ? ISD::ZERO_EXTEND :
1228 ISD::SIGN_EXTEND;
1229 SrcReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op,
1230 SrcReg, /*TODO: Kill=*/false);
1231 }
1232
1233 // Make the copy.
1234 unsigned DstReg = VA.getLocReg();
1235 const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
1236 // Avoid a cross-class copy. This is very unlikely.
1237 if (!SrcRC->contains(DstReg))
1238 return false;
1239 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1240 TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg);
1241
1242 // Add register to return instruction.
1243 RetRegs.push_back(VA.getLocReg());
1244 }
1245
Manman Ren1c3f65a2016-04-26 18:08:06 +00001246 // Swift calling convention does not require we copy the sret argument
1247 // into %rax/%eax for the return, and SRetReturnReg is not set for Swift.
1248
Dimitry Andric227b9282016-01-03 17:22:03 +00001249 // All x86 ABIs require that for returning structs by value we copy
1250 // the sret argument into %rax/%eax (depending on ABI) for the return.
1251 // We saved the argument into a virtual register in the entry block,
Michael Kuperstein2ea81ba2015-12-28 14:39:21 +00001252 // so now we copy the value out and into %rax/%eax.
Manman Ren1c3f65a2016-04-26 18:08:06 +00001253 if (F.hasStructRetAttr() && CC != CallingConv::Swift) {
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001254 unsigned Reg = X86MFInfo->getSRetReturnReg();
1255 assert(Reg &&
1256 "SRetReturnReg should have been set in LowerFormalArguments()!");
1257 unsigned RetReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
1258 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1259 TII.get(TargetOpcode::COPY), RetReg).addReg(Reg);
1260 RetRegs.push_back(RetReg);
1261 }
1262
1263 // Now emit the RET.
1264 MachineInstrBuilder MIB =
1265 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1266 TII.get(Subtarget->is64Bit() ? X86::RETQ : X86::RETL));
1267 for (unsigned i = 0, e = RetRegs.size(); i != e; ++i)
1268 MIB.addReg(RetRegs[i], RegState::Implicit);
1269 return true;
1270}
1271
1272/// X86SelectLoad - Select and emit code to implement load instructions.
1273///
1274bool X86FastISel::X86SelectLoad(const Instruction *I) {
1275 const LoadInst *LI = cast<LoadInst>(I);
1276
1277 // Atomic loads need special handling.
1278 if (LI->isAtomic())
1279 return false;
1280
Manman Ren57518142016-04-11 21:08:06 +00001281 const Value *SV = I->getOperand(0);
1282 if (TLI.supportSwiftError()) {
1283 // Swifterror values can come from either a function parameter with
1284 // swifterror attribute or an alloca with swifterror attribute.
1285 if (const Argument *Arg = dyn_cast<Argument>(SV)) {
1286 if (Arg->hasSwiftErrorAttr())
1287 return false;
1288 }
1289
1290 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
1291 if (Alloca->isSwiftError())
1292 return false;
1293 }
1294 }
1295
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001296 MVT VT;
1297 if (!isTypeLegal(LI->getType(), VT, /*AllowI1=*/true))
1298 return false;
1299
1300 const Value *Ptr = LI->getPointerOperand();
1301
1302 X86AddressMode AM;
1303 if (!X86SelectAddress(Ptr, AM))
1304 return false;
1305
Andrea Di Biagio8f7feec2015-03-26 11:29:02 +00001306 unsigned Alignment = LI->getAlignment();
1307 unsigned ABIAlignment = DL.getABITypeAlignment(LI->getType());
1308 if (Alignment == 0) // Ensure that codegen never sees alignment 0
1309 Alignment = ABIAlignment;
1310
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001311 unsigned ResultReg = 0;
Andrea Di Biagio8f7feec2015-03-26 11:29:02 +00001312 if (!X86FastEmitLoad(VT, AM, createMachineMemOperandFor(LI), ResultReg,
1313 Alignment))
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001314 return false;
1315
1316 updateValueMap(I, ResultReg);
1317 return true;
1318}
1319
1320static unsigned X86ChooseCmpOpcode(EVT VT, const X86Subtarget *Subtarget) {
1321 bool HasAVX = Subtarget->hasAVX();
1322 bool X86ScalarSSEf32 = Subtarget->hasSSE1();
1323 bool X86ScalarSSEf64 = Subtarget->hasSSE2();
1324
1325 switch (VT.getSimpleVT().SimpleTy) {
1326 default: return 0;
1327 case MVT::i8: return X86::CMP8rr;
1328 case MVT::i16: return X86::CMP16rr;
1329 case MVT::i32: return X86::CMP32rr;
1330 case MVT::i64: return X86::CMP64rr;
1331 case MVT::f32:
1332 return X86ScalarSSEf32 ? (HasAVX ? X86::VUCOMISSrr : X86::UCOMISSrr) : 0;
1333 case MVT::f64:
1334 return X86ScalarSSEf64 ? (HasAVX ? X86::VUCOMISDrr : X86::UCOMISDrr) : 0;
1335 }
1336}
1337
Rafael Espindola19141f22015-03-16 14:05:49 +00001338/// If we have a comparison with RHS as the RHS of the comparison, return an
1339/// opcode that works for the compare (e.g. CMP32ri) otherwise return 0.
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001340static unsigned X86ChooseCmpImmediateOpcode(EVT VT, const ConstantInt *RHSC) {
Rafael Espindola933f51a2015-03-16 14:25:08 +00001341 int64_t Val = RHSC->getSExtValue();
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001342 switch (VT.getSimpleVT().SimpleTy) {
1343 // Otherwise, we can't fold the immediate into this comparison.
Rafael Espindola19141f22015-03-16 14:05:49 +00001344 default:
1345 return 0;
1346 case MVT::i8:
1347 return X86::CMP8ri;
1348 case MVT::i16:
Rafael Espindola933f51a2015-03-16 14:25:08 +00001349 if (isInt<8>(Val))
1350 return X86::CMP16ri8;
Rafael Espindola19141f22015-03-16 14:05:49 +00001351 return X86::CMP16ri;
1352 case MVT::i32:
Rafael Espindola933f51a2015-03-16 14:25:08 +00001353 if (isInt<8>(Val))
1354 return X86::CMP32ri8;
Rafael Espindola19141f22015-03-16 14:05:49 +00001355 return X86::CMP32ri;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001356 case MVT::i64:
Rafael Espindola933f51a2015-03-16 14:25:08 +00001357 if (isInt<8>(Val))
1358 return X86::CMP64ri8;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001359 // 64-bit comparisons are only valid if the immediate fits in a 32-bit sext
1360 // field.
Rafael Espindola933f51a2015-03-16 14:25:08 +00001361 if (isInt<32>(Val))
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001362 return X86::CMP64ri32;
1363 return 0;
1364 }
1365}
1366
Benjamin Kramerbdc49562016-06-12 15:39:02 +00001367bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1, EVT VT,
1368 const DebugLoc &CurDbgLoc) {
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001369 unsigned Op0Reg = getRegForValue(Op0);
1370 if (Op0Reg == 0) return false;
1371
1372 // Handle 'null' like i32/i64 0.
1373 if (isa<ConstantPointerNull>(Op1))
1374 Op1 = Constant::getNullValue(DL.getIntPtrType(Op0->getContext()));
1375
1376 // We have two options: compare with register or immediate. If the RHS of
1377 // the compare is an immediate that we can fold into this compare, use
1378 // CMPri, otherwise use CMPrr.
1379 if (const ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
1380 if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) {
1381 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurDbgLoc, TII.get(CompareImmOpc))
1382 .addReg(Op0Reg)
1383 .addImm(Op1C->getSExtValue());
1384 return true;
1385 }
1386 }
1387
1388 unsigned CompareOpc = X86ChooseCmpOpcode(VT, Subtarget);
1389 if (CompareOpc == 0) return false;
1390
1391 unsigned Op1Reg = getRegForValue(Op1);
1392 if (Op1Reg == 0) return false;
1393 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurDbgLoc, TII.get(CompareOpc))
1394 .addReg(Op0Reg)
1395 .addReg(Op1Reg);
1396
1397 return true;
1398}
1399
1400bool X86FastISel::X86SelectCmp(const Instruction *I) {
1401 const CmpInst *CI = cast<CmpInst>(I);
1402
1403 MVT VT;
1404 if (!isTypeLegal(I->getOperand(0)->getType(), VT))
1405 return false;
1406
Elena Demikhovskyad0a56f2016-07-06 14:15:43 +00001407 if (I->getType()->isIntegerTy(1) && Subtarget->hasAVX512())
1408 return false;
1409
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001410 // Try to optimize or fold the cmp.
1411 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
1412 unsigned ResultReg = 0;
1413 switch (Predicate) {
1414 default: break;
1415 case CmpInst::FCMP_FALSE: {
1416 ResultReg = createResultReg(&X86::GR32RegClass);
1417 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV32r0),
1418 ResultReg);
1419 ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultReg, /*Kill=*/true,
1420 X86::sub_8bit);
1421 if (!ResultReg)
1422 return false;
1423 break;
1424 }
1425 case CmpInst::FCMP_TRUE: {
1426 ResultReg = createResultReg(&X86::GR8RegClass);
1427 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV8ri),
1428 ResultReg).addImm(1);
1429 break;
1430 }
1431 }
1432
1433 if (ResultReg) {
1434 updateValueMap(I, ResultReg);
1435 return true;
1436 }
1437
1438 const Value *LHS = CI->getOperand(0);
1439 const Value *RHS = CI->getOperand(1);
1440
1441 // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x, 0.0.
1442 // We don't have to materialize a zero constant for this case and can just use
1443 // %x again on the RHS.
1444 if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO) {
1445 const auto *RHSC = dyn_cast<ConstantFP>(RHS);
1446 if (RHSC && RHSC->isNullValue())
1447 RHS = LHS;
1448 }
1449
1450 // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
1451 static unsigned SETFOpcTable[2][3] = {
1452 { X86::SETEr, X86::SETNPr, X86::AND8rr },
1453 { X86::SETNEr, X86::SETPr, X86::OR8rr }
1454 };
1455 unsigned *SETFOpc = nullptr;
1456 switch (Predicate) {
1457 default: break;
1458 case CmpInst::FCMP_OEQ: SETFOpc = &SETFOpcTable[0][0]; break;
1459 case CmpInst::FCMP_UNE: SETFOpc = &SETFOpcTable[1][0]; break;
1460 }
1461
1462 ResultReg = createResultReg(&X86::GR8RegClass);
1463 if (SETFOpc) {
1464 if (!X86FastEmitCompare(LHS, RHS, VT, I->getDebugLoc()))
1465 return false;
1466
1467 unsigned FlagReg1 = createResultReg(&X86::GR8RegClass);
1468 unsigned FlagReg2 = createResultReg(&X86::GR8RegClass);
1469 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[0]),
1470 FlagReg1);
1471 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[1]),
1472 FlagReg2);
1473 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[2]),
1474 ResultReg).addReg(FlagReg1).addReg(FlagReg2);
1475 updateValueMap(I, ResultReg);
1476 return true;
1477 }
1478
1479 X86::CondCode CC;
1480 bool SwapArgs;
1481 std::tie(CC, SwapArgs) = getX86ConditionCode(Predicate);
1482 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
1483 unsigned Opc = X86::getSETFromCond(CC);
1484
1485 if (SwapArgs)
1486 std::swap(LHS, RHS);
1487
1488 // Emit a compare of LHS/RHS.
1489 if (!X86FastEmitCompare(LHS, RHS, VT, I->getDebugLoc()))
1490 return false;
1491
1492 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg);
1493 updateValueMap(I, ResultReg);
1494 return true;
1495}
1496
1497bool X86FastISel::X86SelectZExt(const Instruction *I) {
Mehdi Amini44ede332015-07-09 02:09:04 +00001498 EVT DstVT = TLI.getValueType(DL, I->getType());
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001499 if (!TLI.isTypeLegal(DstVT))
1500 return false;
1501
1502 unsigned ResultReg = getRegForValue(I->getOperand(0));
1503 if (ResultReg == 0)
1504 return false;
1505
1506 // Handle zero-extension from i1 to i8, which is common.
Mehdi Amini44ede332015-07-09 02:09:04 +00001507 MVT SrcVT = TLI.getSimpleValueType(DL, I->getOperand(0)->getType());
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001508 if (SrcVT.SimpleTy == MVT::i1) {
1509 // Set the high bits to zero.
1510 ResultReg = fastEmitZExtFromI1(MVT::i8, ResultReg, /*TODO: Kill=*/false);
1511 SrcVT = MVT::i8;
1512
1513 if (ResultReg == 0)
1514 return false;
1515 }
1516
1517 if (DstVT == MVT::i64) {
1518 // Handle extension to 64-bits via sub-register shenanigans.
1519 unsigned MovInst;
1520
1521 switch (SrcVT.SimpleTy) {
1522 case MVT::i8: MovInst = X86::MOVZX32rr8; break;
1523 case MVT::i16: MovInst = X86::MOVZX32rr16; break;
1524 case MVT::i32: MovInst = X86::MOV32rr; break;
1525 default: llvm_unreachable("Unexpected zext to i64 source type");
1526 }
1527
1528 unsigned Result32 = createResultReg(&X86::GR32RegClass);
1529 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovInst), Result32)
1530 .addReg(ResultReg);
1531
1532 ResultReg = createResultReg(&X86::GR64RegClass);
1533 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::SUBREG_TO_REG),
1534 ResultReg)
1535 .addImm(0).addReg(Result32).addImm(X86::sub_32bit);
1536 } else if (DstVT != MVT::i8) {
1537 ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND,
1538 ResultReg, /*Kill=*/true);
1539 if (ResultReg == 0)
1540 return false;
1541 }
1542
1543 updateValueMap(I, ResultReg);
1544 return true;
1545}
1546
1547bool X86FastISel::X86SelectBranch(const Instruction *I) {
1548 // Unconditional branches are selected by tablegen-generated code.
1549 // Handle a conditional branch.
1550 const BranchInst *BI = cast<BranchInst>(I);
1551 MachineBasicBlock *TrueMBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
1552 MachineBasicBlock *FalseMBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
1553
1554 // Fold the common case of a conditional branch with a comparison
1555 // in the same block (values defined on other blocks may not have
1556 // initialized registers).
1557 X86::CondCode CC;
1558 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
1559 if (CI->hasOneUse() && CI->getParent() == I->getParent()) {
Mehdi Amini44ede332015-07-09 02:09:04 +00001560 EVT VT = TLI.getValueType(DL, CI->getOperand(0)->getType());
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001561
1562 // Try to optimize or fold the cmp.
1563 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
1564 switch (Predicate) {
1565 default: break;
1566 case CmpInst::FCMP_FALSE: fastEmitBranch(FalseMBB, DbgLoc); return true;
1567 case CmpInst::FCMP_TRUE: fastEmitBranch(TrueMBB, DbgLoc); return true;
1568 }
1569
1570 const Value *CmpLHS = CI->getOperand(0);
1571 const Value *CmpRHS = CI->getOperand(1);
1572
1573 // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x,
1574 // 0.0.
1575 // We don't have to materialize a zero constant for this case and can just
1576 // use %x again on the RHS.
1577 if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO) {
1578 const auto *CmpRHSC = dyn_cast<ConstantFP>(CmpRHS);
1579 if (CmpRHSC && CmpRHSC->isNullValue())
1580 CmpRHS = CmpLHS;
1581 }
1582
1583 // Try to take advantage of fallthrough opportunities.
1584 if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
1585 std::swap(TrueMBB, FalseMBB);
1586 Predicate = CmpInst::getInversePredicate(Predicate);
1587 }
1588
1589 // FCMP_OEQ and FCMP_UNE cannot be expressed with a single flag/condition
1590 // code check. Instead two branch instructions are required to check all
1591 // the flags. First we change the predicate to a supported condition code,
1592 // which will be the first branch. Later one we will emit the second
1593 // branch.
1594 bool NeedExtraBranch = false;
1595 switch (Predicate) {
1596 default: break;
1597 case CmpInst::FCMP_OEQ:
1598 std::swap(TrueMBB, FalseMBB); // fall-through
1599 case CmpInst::FCMP_UNE:
1600 NeedExtraBranch = true;
1601 Predicate = CmpInst::FCMP_ONE;
1602 break;
1603 }
1604
1605 bool SwapArgs;
1606 unsigned BranchOpc;
1607 std::tie(CC, SwapArgs) = getX86ConditionCode(Predicate);
1608 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
1609
1610 BranchOpc = X86::GetCondBranchFromCond(CC);
1611 if (SwapArgs)
1612 std::swap(CmpLHS, CmpRHS);
1613
1614 // Emit a compare of the LHS and RHS, setting the flags.
1615 if (!X86FastEmitCompare(CmpLHS, CmpRHS, VT, CI->getDebugLoc()))
1616 return false;
1617
1618 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BranchOpc))
1619 .addMBB(TrueMBB);
1620
1621 // X86 requires a second branch to handle UNE (and OEQ, which is mapped
1622 // to UNE above).
1623 if (NeedExtraBranch) {
1624 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JP_1))
1625 .addMBB(TrueMBB);
1626 }
1627
Matthias Braun17af6072015-08-26 01:38:00 +00001628 finishCondBranch(BI->getParent(), TrueMBB, FalseMBB);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001629 return true;
1630 }
1631 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) {
1632 // Handle things like "%cond = trunc i32 %X to i1 / br i1 %cond", which
1633 // typically happen for _Bool and C++ bools.
1634 MVT SourceVT;
1635 if (TI->hasOneUse() && TI->getParent() == I->getParent() &&
1636 isTypeLegal(TI->getOperand(0)->getType(), SourceVT)) {
1637 unsigned TestOpc = 0;
1638 switch (SourceVT.SimpleTy) {
1639 default: break;
1640 case MVT::i8: TestOpc = X86::TEST8ri; break;
1641 case MVT::i16: TestOpc = X86::TEST16ri; break;
1642 case MVT::i32: TestOpc = X86::TEST32ri; break;
1643 case MVT::i64: TestOpc = X86::TEST64ri32; break;
1644 }
1645 if (TestOpc) {
1646 unsigned OpReg = getRegForValue(TI->getOperand(0));
1647 if (OpReg == 0) return false;
1648 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TestOpc))
1649 .addReg(OpReg).addImm(1);
1650
1651 unsigned JmpOpc = X86::JNE_1;
1652 if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
1653 std::swap(TrueMBB, FalseMBB);
1654 JmpOpc = X86::JE_1;
1655 }
1656
1657 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(JmpOpc))
1658 .addMBB(TrueMBB);
Matthias Braun17af6072015-08-26 01:38:00 +00001659
1660 finishCondBranch(BI->getParent(), TrueMBB, FalseMBB);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001661 return true;
1662 }
1663 }
1664 } else if (foldX86XALUIntrinsic(CC, BI, BI->getCondition())) {
1665 // Fake request the condition, otherwise the intrinsic might be completely
1666 // optimized away.
1667 unsigned TmpReg = getRegForValue(BI->getCondition());
1668 if (TmpReg == 0)
1669 return false;
1670
1671 unsigned BranchOpc = X86::GetCondBranchFromCond(CC);
1672
1673 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BranchOpc))
1674 .addMBB(TrueMBB);
Matthias Braun17af6072015-08-26 01:38:00 +00001675 finishCondBranch(BI->getParent(), TrueMBB, FalseMBB);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001676 return true;
1677 }
1678
1679 // Otherwise do a clumsy setcc and re-test it.
1680 // Note that i1 essentially gets ANY_EXTEND'ed to i8 where it isn't used
1681 // in an explicit cast, so make sure to handle that correctly.
1682 unsigned OpReg = getRegForValue(BI->getCondition());
1683 if (OpReg == 0) return false;
1684
1685 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
1686 .addReg(OpReg).addImm(1);
1687 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JNE_1))
1688 .addMBB(TrueMBB);
Matthias Braun17af6072015-08-26 01:38:00 +00001689 finishCondBranch(BI->getParent(), TrueMBB, FalseMBB);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001690 return true;
1691}
1692
1693bool X86FastISel::X86SelectShift(const Instruction *I) {
1694 unsigned CReg = 0, OpReg = 0;
1695 const TargetRegisterClass *RC = nullptr;
1696 if (I->getType()->isIntegerTy(8)) {
1697 CReg = X86::CL;
1698 RC = &X86::GR8RegClass;
1699 switch (I->getOpcode()) {
1700 case Instruction::LShr: OpReg = X86::SHR8rCL; break;
1701 case Instruction::AShr: OpReg = X86::SAR8rCL; break;
1702 case Instruction::Shl: OpReg = X86::SHL8rCL; break;
1703 default: return false;
1704 }
1705 } else if (I->getType()->isIntegerTy(16)) {
1706 CReg = X86::CX;
1707 RC = &X86::GR16RegClass;
1708 switch (I->getOpcode()) {
1709 case Instruction::LShr: OpReg = X86::SHR16rCL; break;
1710 case Instruction::AShr: OpReg = X86::SAR16rCL; break;
1711 case Instruction::Shl: OpReg = X86::SHL16rCL; break;
1712 default: return false;
1713 }
1714 } else if (I->getType()->isIntegerTy(32)) {
1715 CReg = X86::ECX;
1716 RC = &X86::GR32RegClass;
1717 switch (I->getOpcode()) {
1718 case Instruction::LShr: OpReg = X86::SHR32rCL; break;
1719 case Instruction::AShr: OpReg = X86::SAR32rCL; break;
1720 case Instruction::Shl: OpReg = X86::SHL32rCL; break;
1721 default: return false;
1722 }
1723 } else if (I->getType()->isIntegerTy(64)) {
1724 CReg = X86::RCX;
1725 RC = &X86::GR64RegClass;
1726 switch (I->getOpcode()) {
1727 case Instruction::LShr: OpReg = X86::SHR64rCL; break;
1728 case Instruction::AShr: OpReg = X86::SAR64rCL; break;
1729 case Instruction::Shl: OpReg = X86::SHL64rCL; break;
1730 default: return false;
1731 }
1732 } else {
1733 return false;
1734 }
1735
1736 MVT VT;
1737 if (!isTypeLegal(I->getType(), VT))
1738 return false;
1739
1740 unsigned Op0Reg = getRegForValue(I->getOperand(0));
1741 if (Op0Reg == 0) return false;
1742
1743 unsigned Op1Reg = getRegForValue(I->getOperand(1));
1744 if (Op1Reg == 0) return false;
1745 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
1746 CReg).addReg(Op1Reg);
1747
1748 // The shift instruction uses X86::CL. If we defined a super-register
1749 // of X86::CL, emit a subreg KILL to precisely describe what we're doing here.
1750 if (CReg != X86::CL)
1751 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1752 TII.get(TargetOpcode::KILL), X86::CL)
1753 .addReg(CReg, RegState::Kill);
1754
1755 unsigned ResultReg = createResultReg(RC);
1756 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(OpReg), ResultReg)
1757 .addReg(Op0Reg);
1758 updateValueMap(I, ResultReg);
1759 return true;
1760}
1761
1762bool X86FastISel::X86SelectDivRem(const Instruction *I) {
1763 const static unsigned NumTypes = 4; // i8, i16, i32, i64
1764 const static unsigned NumOps = 4; // SDiv, SRem, UDiv, URem
1765 const static bool S = true; // IsSigned
1766 const static bool U = false; // !IsSigned
1767 const static unsigned Copy = TargetOpcode::COPY;
1768 // For the X86 DIV/IDIV instruction, in most cases the dividend
1769 // (numerator) must be in a specific register pair highreg:lowreg,
1770 // producing the quotient in lowreg and the remainder in highreg.
1771 // For most data types, to set up the instruction, the dividend is
1772 // copied into lowreg, and lowreg is sign-extended or zero-extended
1773 // into highreg. The exception is i8, where the dividend is defined
1774 // as a single register rather than a register pair, and we
1775 // therefore directly sign-extend or zero-extend the dividend into
1776 // lowreg, instead of copying, and ignore the highreg.
1777 const static struct DivRemEntry {
1778 // The following portion depends only on the data type.
1779 const TargetRegisterClass *RC;
1780 unsigned LowInReg; // low part of the register pair
1781 unsigned HighInReg; // high part of the register pair
1782 // The following portion depends on both the data type and the operation.
1783 struct DivRemResult {
1784 unsigned OpDivRem; // The specific DIV/IDIV opcode to use.
1785 unsigned OpSignExtend; // Opcode for sign-extending lowreg into
1786 // highreg, or copying a zero into highreg.
1787 unsigned OpCopy; // Opcode for copying dividend into lowreg, or
1788 // zero/sign-extending into lowreg for i8.
1789 unsigned DivRemResultReg; // Register containing the desired result.
1790 bool IsOpSigned; // Whether to use signed or unsigned form.
1791 } ResultTable[NumOps];
1792 } OpTable[NumTypes] = {
1793 { &X86::GR8RegClass, X86::AX, 0, {
1794 { X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S }, // SDiv
1795 { X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S }, // SRem
1796 { X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL, U }, // UDiv
1797 { X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH, U }, // URem
1798 }
1799 }, // i8
1800 { &X86::GR16RegClass, X86::AX, X86::DX, {
1801 { X86::IDIV16r, X86::CWD, Copy, X86::AX, S }, // SDiv
1802 { X86::IDIV16r, X86::CWD, Copy, X86::DX, S }, // SRem
1803 { X86::DIV16r, X86::MOV32r0, Copy, X86::AX, U }, // UDiv
1804 { X86::DIV16r, X86::MOV32r0, Copy, X86::DX, U }, // URem
1805 }
1806 }, // i16
1807 { &X86::GR32RegClass, X86::EAX, X86::EDX, {
1808 { X86::IDIV32r, X86::CDQ, Copy, X86::EAX, S }, // SDiv
1809 { X86::IDIV32r, X86::CDQ, Copy, X86::EDX, S }, // SRem
1810 { X86::DIV32r, X86::MOV32r0, Copy, X86::EAX, U }, // UDiv
1811 { X86::DIV32r, X86::MOV32r0, Copy, X86::EDX, U }, // URem
1812 }
1813 }, // i32
1814 { &X86::GR64RegClass, X86::RAX, X86::RDX, {
1815 { X86::IDIV64r, X86::CQO, Copy, X86::RAX, S }, // SDiv
1816 { X86::IDIV64r, X86::CQO, Copy, X86::RDX, S }, // SRem
1817 { X86::DIV64r, X86::MOV32r0, Copy, X86::RAX, U }, // UDiv
1818 { X86::DIV64r, X86::MOV32r0, Copy, X86::RDX, U }, // URem
1819 }
1820 }, // i64
1821 };
1822
1823 MVT VT;
1824 if (!isTypeLegal(I->getType(), VT))
1825 return false;
1826
1827 unsigned TypeIndex, OpIndex;
1828 switch (VT.SimpleTy) {
1829 default: return false;
1830 case MVT::i8: TypeIndex = 0; break;
1831 case MVT::i16: TypeIndex = 1; break;
1832 case MVT::i32: TypeIndex = 2; break;
1833 case MVT::i64: TypeIndex = 3;
1834 if (!Subtarget->is64Bit())
1835 return false;
1836 break;
1837 }
1838
1839 switch (I->getOpcode()) {
1840 default: llvm_unreachable("Unexpected div/rem opcode");
1841 case Instruction::SDiv: OpIndex = 0; break;
1842 case Instruction::SRem: OpIndex = 1; break;
1843 case Instruction::UDiv: OpIndex = 2; break;
1844 case Instruction::URem: OpIndex = 3; break;
1845 }
1846
1847 const DivRemEntry &TypeEntry = OpTable[TypeIndex];
1848 const DivRemEntry::DivRemResult &OpEntry = TypeEntry.ResultTable[OpIndex];
1849 unsigned Op0Reg = getRegForValue(I->getOperand(0));
1850 if (Op0Reg == 0)
1851 return false;
1852 unsigned Op1Reg = getRegForValue(I->getOperand(1));
1853 if (Op1Reg == 0)
1854 return false;
1855
1856 // Move op0 into low-order input register.
1857 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1858 TII.get(OpEntry.OpCopy), TypeEntry.LowInReg).addReg(Op0Reg);
1859 // Zero-extend or sign-extend into high-order input register.
1860 if (OpEntry.OpSignExtend) {
1861 if (OpEntry.IsOpSigned)
1862 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1863 TII.get(OpEntry.OpSignExtend));
1864 else {
1865 unsigned Zero32 = createResultReg(&X86::GR32RegClass);
1866 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1867 TII.get(X86::MOV32r0), Zero32);
1868
1869 // Copy the zero into the appropriate sub/super/identical physical
1870 // register. Unfortunately the operations needed are not uniform enough
1871 // to fit neatly into the table above.
1872 if (VT.SimpleTy == MVT::i16) {
1873 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1874 TII.get(Copy), TypeEntry.HighInReg)
1875 .addReg(Zero32, 0, X86::sub_16bit);
1876 } else if (VT.SimpleTy == MVT::i32) {
1877 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1878 TII.get(Copy), TypeEntry.HighInReg)
1879 .addReg(Zero32);
1880 } else if (VT.SimpleTy == MVT::i64) {
1881 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1882 TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg)
1883 .addImm(0).addReg(Zero32).addImm(X86::sub_32bit);
1884 }
1885 }
1886 }
1887 // Generate the DIV/IDIV instruction.
1888 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1889 TII.get(OpEntry.OpDivRem)).addReg(Op1Reg);
1890 // For i8 remainder, we can't reference AH directly, as we'll end
1891 // up with bogus copies like %R9B = COPY %AH. Reference AX
1892 // instead to prevent AH references in a REX instruction.
1893 //
1894 // The current assumption of the fast register allocator is that isel
1895 // won't generate explicit references to the GPR8_NOREX registers. If
1896 // the allocator and/or the backend get enhanced to be more robust in
1897 // that regard, this can be, and should be, removed.
1898 unsigned ResultReg = 0;
1899 if ((I->getOpcode() == Instruction::SRem ||
1900 I->getOpcode() == Instruction::URem) &&
1901 OpEntry.DivRemResultReg == X86::AH && Subtarget->is64Bit()) {
1902 unsigned SourceSuperReg = createResultReg(&X86::GR16RegClass);
1903 unsigned ResultSuperReg = createResultReg(&X86::GR16RegClass);
1904 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1905 TII.get(Copy), SourceSuperReg).addReg(X86::AX);
1906
1907 // Shift AX right by 8 bits instead of using AH.
1908 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SHR16ri),
1909 ResultSuperReg).addReg(SourceSuperReg).addImm(8);
1910
1911 // Now reference the 8-bit subreg of the result.
1912 ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultSuperReg,
1913 /*Kill=*/true, X86::sub_8bit);
1914 }
1915 // Copy the result out of the physreg if we haven't already.
1916 if (!ResultReg) {
1917 ResultReg = createResultReg(TypeEntry.RC);
1918 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Copy), ResultReg)
1919 .addReg(OpEntry.DivRemResultReg);
1920 }
1921 updateValueMap(I, ResultReg);
1922
1923 return true;
1924}
1925
1926/// \brief Emit a conditional move instruction (if the are supported) to lower
1927/// the select.
1928bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {
1929 // Check if the subtarget supports these instructions.
1930 if (!Subtarget->hasCMov())
1931 return false;
1932
1933 // FIXME: Add support for i8.
1934 if (RetVT < MVT::i16 || RetVT > MVT::i64)
1935 return false;
1936
1937 const Value *Cond = I->getOperand(0);
1938 const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
1939 bool NeedTest = true;
1940 X86::CondCode CC = X86::COND_NE;
1941
1942 // Optimize conditions coming from a compare if both instructions are in the
1943 // same basic block (values defined in other basic blocks may not have
1944 // initialized registers).
1945 const auto *CI = dyn_cast<CmpInst>(Cond);
1946 if (CI && (CI->getParent() == I->getParent())) {
1947 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
1948
1949 // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
1950 static unsigned SETFOpcTable[2][3] = {
1951 { X86::SETNPr, X86::SETEr , X86::TEST8rr },
1952 { X86::SETPr, X86::SETNEr, X86::OR8rr }
1953 };
1954 unsigned *SETFOpc = nullptr;
1955 switch (Predicate) {
1956 default: break;
1957 case CmpInst::FCMP_OEQ:
1958 SETFOpc = &SETFOpcTable[0][0];
1959 Predicate = CmpInst::ICMP_NE;
1960 break;
1961 case CmpInst::FCMP_UNE:
1962 SETFOpc = &SETFOpcTable[1][0];
1963 Predicate = CmpInst::ICMP_NE;
1964 break;
1965 }
1966
1967 bool NeedSwap;
1968 std::tie(CC, NeedSwap) = getX86ConditionCode(Predicate);
1969 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
1970
1971 const Value *CmpLHS = CI->getOperand(0);
1972 const Value *CmpRHS = CI->getOperand(1);
1973 if (NeedSwap)
1974 std::swap(CmpLHS, CmpRHS);
1975
Mehdi Amini44ede332015-07-09 02:09:04 +00001976 EVT CmpVT = TLI.getValueType(DL, CmpLHS->getType());
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001977 // Emit a compare of the LHS and RHS, setting the flags.
1978 if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT, CI->getDebugLoc()))
1979 return false;
1980
1981 if (SETFOpc) {
1982 unsigned FlagReg1 = createResultReg(&X86::GR8RegClass);
1983 unsigned FlagReg2 = createResultReg(&X86::GR8RegClass);
1984 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[0]),
1985 FlagReg1);
1986 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[1]),
1987 FlagReg2);
1988 auto const &II = TII.get(SETFOpc[2]);
1989 if (II.getNumDefs()) {
1990 unsigned TmpReg = createResultReg(&X86::GR8RegClass);
1991 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, TmpReg)
1992 .addReg(FlagReg2).addReg(FlagReg1);
1993 } else {
1994 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1995 .addReg(FlagReg2).addReg(FlagReg1);
1996 }
1997 }
1998 NeedTest = false;
1999 } else if (foldX86XALUIntrinsic(CC, I, Cond)) {
2000 // Fake request the condition, otherwise the intrinsic might be completely
2001 // optimized away.
2002 unsigned TmpReg = getRegForValue(Cond);
2003 if (TmpReg == 0)
2004 return false;
2005
2006 NeedTest = false;
2007 }
2008
2009 if (NeedTest) {
2010 // Selects operate on i1, however, CondReg is 8 bits width and may contain
2011 // garbage. Indeed, only the less significant bit is supposed to be
2012 // accurate. If we read more than the lsb, we may see non-zero values
2013 // whereas lsb is zero. Therefore, we have to truncate Op0Reg to i1 for
2014 // the select. This is achieved by performing TEST against 1.
2015 unsigned CondReg = getRegForValue(Cond);
2016 if (CondReg == 0)
2017 return false;
2018 bool CondIsKill = hasTrivialKill(Cond);
2019
2020 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
2021 .addReg(CondReg, getKillRegState(CondIsKill)).addImm(1);
2022 }
2023
2024 const Value *LHS = I->getOperand(1);
2025 const Value *RHS = I->getOperand(2);
2026
2027 unsigned RHSReg = getRegForValue(RHS);
2028 bool RHSIsKill = hasTrivialKill(RHS);
2029
2030 unsigned LHSReg = getRegForValue(LHS);
2031 bool LHSIsKill = hasTrivialKill(LHS);
2032
2033 if (!LHSReg || !RHSReg)
2034 return false;
2035
2036 unsigned Opc = X86::getCMovFromCond(CC, RC->getSize());
2037 unsigned ResultReg = fastEmitInst_rr(Opc, RC, RHSReg, RHSIsKill,
2038 LHSReg, LHSIsKill);
2039 updateValueMap(I, ResultReg);
2040 return true;
2041}
2042
Sanjay Patel302404b2015-03-05 21:46:54 +00002043/// \brief Emit SSE or AVX instructions to lower the select.
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002044///
2045/// Try to use SSE1/SSE2 instructions to simulate a select without branches.
2046/// This lowers fp selects into a CMP/AND/ANDN/OR sequence when the necessary
Sanjay Patel302404b2015-03-05 21:46:54 +00002047/// SSE instructions are available. If AVX is available, try to use a VBLENDV.
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002048bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
2049 // Optimize conditions coming from a compare if both instructions are in the
2050 // same basic block (values defined in other basic blocks may not have
2051 // initialized registers).
2052 const auto *CI = dyn_cast<FCmpInst>(I->getOperand(0));
2053 if (!CI || (CI->getParent() != I->getParent()))
2054 return false;
2055
2056 if (I->getType() != CI->getOperand(0)->getType() ||
2057 !((Subtarget->hasSSE1() && RetVT == MVT::f32) ||
2058 (Subtarget->hasSSE2() && RetVT == MVT::f64)))
2059 return false;
2060
2061 const Value *CmpLHS = CI->getOperand(0);
2062 const Value *CmpRHS = CI->getOperand(1);
2063 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
2064
2065 // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x, 0.0.
2066 // We don't have to materialize a zero constant for this case and can just use
2067 // %x again on the RHS.
2068 if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO) {
2069 const auto *CmpRHSC = dyn_cast<ConstantFP>(CmpRHS);
2070 if (CmpRHSC && CmpRHSC->isNullValue())
2071 CmpRHS = CmpLHS;
2072 }
2073
2074 unsigned CC;
2075 bool NeedSwap;
2076 std::tie(CC, NeedSwap) = getX86SSEConditionCode(Predicate);
2077 if (CC > 7)
2078 return false;
2079
2080 if (NeedSwap)
2081 std::swap(CmpLHS, CmpRHS);
2082
Sanjay Patel302404b2015-03-05 21:46:54 +00002083 // Choose the SSE instruction sequence based on data type (float or double).
2084 static unsigned OpcTable[2][4] = {
2085 { X86::CMPSSrr, X86::FsANDPSrr, X86::FsANDNPSrr, X86::FsORPSrr },
2086 { X86::CMPSDrr, X86::FsANDPDrr, X86::FsANDNPDrr, X86::FsORPDrr }
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002087 };
2088
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002089 unsigned *Opc = nullptr;
2090 switch (RetVT.SimpleTy) {
2091 default: return false;
Sanjay Patel302404b2015-03-05 21:46:54 +00002092 case MVT::f32: Opc = &OpcTable[0][0]; break;
2093 case MVT::f64: Opc = &OpcTable[1][0]; break;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002094 }
2095
2096 const Value *LHS = I->getOperand(1);
2097 const Value *RHS = I->getOperand(2);
2098
2099 unsigned LHSReg = getRegForValue(LHS);
2100 bool LHSIsKill = hasTrivialKill(LHS);
2101
2102 unsigned RHSReg = getRegForValue(RHS);
2103 bool RHSIsKill = hasTrivialKill(RHS);
2104
2105 unsigned CmpLHSReg = getRegForValue(CmpLHS);
2106 bool CmpLHSIsKill = hasTrivialKill(CmpLHS);
2107
2108 unsigned CmpRHSReg = getRegForValue(CmpRHS);
2109 bool CmpRHSIsKill = hasTrivialKill(CmpRHS);
2110
2111 if (!LHSReg || !RHSReg || !CmpLHS || !CmpRHS)
2112 return false;
2113
2114 const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
Sanjay Patel302404b2015-03-05 21:46:54 +00002115 unsigned ResultReg;
2116
2117 if (Subtarget->hasAVX()) {
Matthias Braun818c78d2015-08-31 18:25:11 +00002118 const TargetRegisterClass *FR32 = &X86::FR32RegClass;
2119 const TargetRegisterClass *VR128 = &X86::VR128RegClass;
2120
Sanjay Patel302404b2015-03-05 21:46:54 +00002121 // If we have AVX, create 1 blendv instead of 3 logic instructions.
2122 // Blendv was introduced with SSE 4.1, but the 2 register form implicitly
2123 // uses XMM0 as the selection register. That may need just as many
2124 // instructions as the AND/ANDN/OR sequence due to register moves, so
2125 // don't bother.
2126 unsigned CmpOpcode =
2127 (RetVT.SimpleTy == MVT::f32) ? X86::VCMPSSrr : X86::VCMPSDrr;
2128 unsigned BlendOpcode =
2129 (RetVT.SimpleTy == MVT::f32) ? X86::VBLENDVPSrr : X86::VBLENDVPDrr;
2130
Matthias Braun818c78d2015-08-31 18:25:11 +00002131 unsigned CmpReg = fastEmitInst_rri(CmpOpcode, FR32, CmpLHSReg, CmpLHSIsKill,
Sanjay Patel302404b2015-03-05 21:46:54 +00002132 CmpRHSReg, CmpRHSIsKill, CC);
Matthias Braun818c78d2015-08-31 18:25:11 +00002133 unsigned VBlendReg = fastEmitInst_rrr(BlendOpcode, VR128, RHSReg, RHSIsKill,
2134 LHSReg, LHSIsKill, CmpReg, true);
2135 ResultReg = createResultReg(RC);
2136 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2137 TII.get(TargetOpcode::COPY), ResultReg).addReg(VBlendReg);
Sanjay Patel302404b2015-03-05 21:46:54 +00002138 } else {
2139 unsigned CmpReg = fastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpLHSIsKill,
2140 CmpRHSReg, CmpRHSIsKill, CC);
2141 unsigned AndReg = fastEmitInst_rr(Opc[1], RC, CmpReg, /*IsKill=*/false,
2142 LHSReg, LHSIsKill);
2143 unsigned AndNReg = fastEmitInst_rr(Opc[2], RC, CmpReg, /*IsKill=*/true,
2144 RHSReg, RHSIsKill);
2145 ResultReg = fastEmitInst_rr(Opc[3], RC, AndNReg, /*IsKill=*/true,
2146 AndReg, /*IsKill=*/true);
2147 }
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002148 updateValueMap(I, ResultReg);
2149 return true;
2150}
2151
2152bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) {
2153 // These are pseudo CMOV instructions and will be later expanded into control-
2154 // flow.
2155 unsigned Opc;
2156 switch (RetVT.SimpleTy) {
2157 default: return false;
2158 case MVT::i8: Opc = X86::CMOV_GR8; break;
2159 case MVT::i16: Opc = X86::CMOV_GR16; break;
2160 case MVT::i32: Opc = X86::CMOV_GR32; break;
2161 case MVT::f32: Opc = X86::CMOV_FR32; break;
2162 case MVT::f64: Opc = X86::CMOV_FR64; break;
2163 }
2164
2165 const Value *Cond = I->getOperand(0);
2166 X86::CondCode CC = X86::COND_NE;
2167
2168 // Optimize conditions coming from a compare if both instructions are in the
2169 // same basic block (values defined in other basic blocks may not have
2170 // initialized registers).
2171 const auto *CI = dyn_cast<CmpInst>(Cond);
2172 if (CI && (CI->getParent() == I->getParent())) {
2173 bool NeedSwap;
2174 std::tie(CC, NeedSwap) = getX86ConditionCode(CI->getPredicate());
2175 if (CC > X86::LAST_VALID_COND)
2176 return false;
2177
2178 const Value *CmpLHS = CI->getOperand(0);
2179 const Value *CmpRHS = CI->getOperand(1);
2180
2181 if (NeedSwap)
2182 std::swap(CmpLHS, CmpRHS);
2183
Mehdi Amini44ede332015-07-09 02:09:04 +00002184 EVT CmpVT = TLI.getValueType(DL, CmpLHS->getType());
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002185 if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT, CI->getDebugLoc()))
2186 return false;
2187 } else {
2188 unsigned CondReg = getRegForValue(Cond);
2189 if (CondReg == 0)
2190 return false;
2191 bool CondIsKill = hasTrivialKill(Cond);
2192 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
2193 .addReg(CondReg, getKillRegState(CondIsKill)).addImm(1);
2194 }
2195
2196 const Value *LHS = I->getOperand(1);
2197 const Value *RHS = I->getOperand(2);
2198
2199 unsigned LHSReg = getRegForValue(LHS);
2200 bool LHSIsKill = hasTrivialKill(LHS);
2201
2202 unsigned RHSReg = getRegForValue(RHS);
2203 bool RHSIsKill = hasTrivialKill(RHS);
2204
2205 if (!LHSReg || !RHSReg)
2206 return false;
2207
2208 const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
2209
2210 unsigned ResultReg =
2211 fastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill, LHSReg, LHSIsKill, CC);
2212 updateValueMap(I, ResultReg);
2213 return true;
2214}
2215
2216bool X86FastISel::X86SelectSelect(const Instruction *I) {
2217 MVT RetVT;
2218 if (!isTypeLegal(I->getType(), RetVT))
2219 return false;
2220
2221 // Check if we can fold the select.
2222 if (const auto *CI = dyn_cast<CmpInst>(I->getOperand(0))) {
2223 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
2224 const Value *Opnd = nullptr;
2225 switch (Predicate) {
2226 default: break;
2227 case CmpInst::FCMP_FALSE: Opnd = I->getOperand(2); break;
2228 case CmpInst::FCMP_TRUE: Opnd = I->getOperand(1); break;
2229 }
2230 // No need for a select anymore - this is an unconditional move.
2231 if (Opnd) {
2232 unsigned OpReg = getRegForValue(Opnd);
2233 if (OpReg == 0)
2234 return false;
2235 bool OpIsKill = hasTrivialKill(Opnd);
2236 const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
2237 unsigned ResultReg = createResultReg(RC);
2238 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2239 TII.get(TargetOpcode::COPY), ResultReg)
2240 .addReg(OpReg, getKillRegState(OpIsKill));
2241 updateValueMap(I, ResultReg);
2242 return true;
2243 }
2244 }
2245
2246 // First try to use real conditional move instructions.
2247 if (X86FastEmitCMoveSelect(RetVT, I))
2248 return true;
2249
2250 // Try to use a sequence of SSE instructions to simulate a conditional move.
2251 if (X86FastEmitSSESelect(RetVT, I))
2252 return true;
2253
2254 // Fall-back to pseudo conditional move instructions, which will be later
2255 // converted to control-flow.
2256 if (X86FastEmitPseudoSelect(RetVT, I))
2257 return true;
2258
2259 return false;
2260}
2261
Andrea Di Biagioe7b58ee2015-02-17 23:40:58 +00002262bool X86FastISel::X86SelectSIToFP(const Instruction *I) {
Andrea Di Biagio98c36702015-04-20 11:56:59 +00002263 // The target-independent selection algorithm in FastISel already knows how
2264 // to select a SINT_TO_FP if the target is SSE but not AVX.
2265 // Early exit if the subtarget doesn't have AVX.
2266 if (!Subtarget->hasAVX())
2267 return false;
2268
Andrea Di Biagioe7b58ee2015-02-17 23:40:58 +00002269 if (!I->getOperand(0)->getType()->isIntegerTy(32))
2270 return false;
2271
2272 // Select integer to float/double conversion.
2273 unsigned OpReg = getRegForValue(I->getOperand(0));
2274 if (OpReg == 0)
2275 return false;
2276
Andrea Di Biagioe7b58ee2015-02-17 23:40:58 +00002277 const TargetRegisterClass *RC = nullptr;
2278 unsigned Opcode;
2279
Andrea Di Biagiodf93ccf2015-03-04 14:23:25 +00002280 if (I->getType()->isDoubleTy()) {
Andrea Di Biagioe7b58ee2015-02-17 23:40:58 +00002281 // sitofp int -> double
Andrea Di Biagiodf93ccf2015-03-04 14:23:25 +00002282 Opcode = X86::VCVTSI2SDrr;
Andrea Di Biagioe7b58ee2015-02-17 23:40:58 +00002283 RC = &X86::FR64RegClass;
Andrea Di Biagiodf93ccf2015-03-04 14:23:25 +00002284 } else if (I->getType()->isFloatTy()) {
Andrea Di Biagioe7b58ee2015-02-17 23:40:58 +00002285 // sitofp int -> float
Andrea Di Biagiodf93ccf2015-03-04 14:23:25 +00002286 Opcode = X86::VCVTSI2SSrr;
Andrea Di Biagioe7b58ee2015-02-17 23:40:58 +00002287 RC = &X86::FR32RegClass;
2288 } else
2289 return false;
2290
Andrea Di Biagiodf93ccf2015-03-04 14:23:25 +00002291 unsigned ImplicitDefReg = createResultReg(RC);
2292 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2293 TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
2294 unsigned ResultReg =
2295 fastEmitInst_rr(Opcode, RC, ImplicitDefReg, true, OpReg, false);
Andrea Di Biagioe7b58ee2015-02-17 23:40:58 +00002296 updateValueMap(I, ResultReg);
2297 return true;
2298}
2299
Andrea Di Biagio62622d22015-02-10 12:04:41 +00002300// Helper method used by X86SelectFPExt and X86SelectFPTrunc.
2301bool X86FastISel::X86SelectFPExtOrFPTrunc(const Instruction *I,
2302 unsigned TargetOpc,
2303 const TargetRegisterClass *RC) {
2304 assert((I->getOpcode() == Instruction::FPExt ||
2305 I->getOpcode() == Instruction::FPTrunc) &&
2306 "Instruction must be an FPExt or FPTrunc!");
2307
2308 unsigned OpReg = getRegForValue(I->getOperand(0));
2309 if (OpReg == 0)
2310 return false;
2311
2312 unsigned ResultReg = createResultReg(RC);
2313 MachineInstrBuilder MIB;
2314 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpc),
2315 ResultReg);
2316 if (Subtarget->hasAVX())
2317 MIB.addReg(OpReg);
2318 MIB.addReg(OpReg);
2319 updateValueMap(I, ResultReg);
2320 return true;
2321}
2322
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002323bool X86FastISel::X86SelectFPExt(const Instruction *I) {
Andrea Di Biagio62622d22015-02-10 12:04:41 +00002324 if (X86ScalarSSEf64 && I->getType()->isDoubleTy() &&
2325 I->getOperand(0)->getType()->isFloatTy()) {
2326 // fpext from float to double.
2327 unsigned Opc = Subtarget->hasAVX() ? X86::VCVTSS2SDrr : X86::CVTSS2SDrr;
2328 return X86SelectFPExtOrFPTrunc(I, Opc, &X86::FR64RegClass);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002329 }
2330
2331 return false;
2332}
2333
2334bool X86FastISel::X86SelectFPTrunc(const Instruction *I) {
Andrea Di Biagio62622d22015-02-10 12:04:41 +00002335 if (X86ScalarSSEf64 && I->getType()->isFloatTy() &&
2336 I->getOperand(0)->getType()->isDoubleTy()) {
2337 // fptrunc from double to float.
2338 unsigned Opc = Subtarget->hasAVX() ? X86::VCVTSD2SSrr : X86::CVTSD2SSrr;
2339 return X86SelectFPExtOrFPTrunc(I, Opc, &X86::FR32RegClass);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002340 }
2341
2342 return false;
2343}
2344
2345bool X86FastISel::X86SelectTrunc(const Instruction *I) {
Mehdi Amini44ede332015-07-09 02:09:04 +00002346 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
2347 EVT DstVT = TLI.getValueType(DL, I->getType());
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002348
2349 // This code only handles truncation to byte.
2350 if (DstVT != MVT::i8 && DstVT != MVT::i1)
2351 return false;
2352 if (!TLI.isTypeLegal(SrcVT))
2353 return false;
2354
2355 unsigned InputReg = getRegForValue(I->getOperand(0));
2356 if (!InputReg)
2357 // Unhandled operand. Halt "fast" selection and bail.
2358 return false;
2359
2360 if (SrcVT == MVT::i8) {
2361 // Truncate from i8 to i1; no code needed.
2362 updateValueMap(I, InputReg);
2363 return true;
2364 }
2365
Pete Cooper7f7c9f12015-05-08 18:29:42 +00002366 bool KillInputReg = false;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002367 if (!Subtarget->is64Bit()) {
2368 // If we're on x86-32; we can't extract an i8 from a general register.
2369 // First issue a copy to GR16_ABCD or GR32_ABCD.
2370 const TargetRegisterClass *CopyRC =
2371 (SrcVT == MVT::i16) ? &X86::GR16_ABCDRegClass : &X86::GR32_ABCDRegClass;
2372 unsigned CopyReg = createResultReg(CopyRC);
2373 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2374 TII.get(TargetOpcode::COPY), CopyReg).addReg(InputReg);
2375 InputReg = CopyReg;
Pete Cooper7f7c9f12015-05-08 18:29:42 +00002376 KillInputReg = true;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002377 }
2378
2379 // Issue an extract_subreg.
2380 unsigned ResultReg = fastEmitInst_extractsubreg(MVT::i8,
Pete Cooper7f7c9f12015-05-08 18:29:42 +00002381 InputReg, KillInputReg,
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002382 X86::sub_8bit);
2383 if (!ResultReg)
2384 return false;
2385
2386 updateValueMap(I, ResultReg);
2387 return true;
2388}
2389
2390bool X86FastISel::IsMemcpySmall(uint64_t Len) {
2391 return Len <= (Subtarget->is64Bit() ? 32 : 16);
2392}
2393
2394bool X86FastISel::TryEmitSmallMemcpy(X86AddressMode DestAM,
2395 X86AddressMode SrcAM, uint64_t Len) {
2396
2397 // Make sure we don't bloat code by inlining very large memcpy's.
2398 if (!IsMemcpySmall(Len))
2399 return false;
2400
2401 bool i64Legal = Subtarget->is64Bit();
2402
2403 // We don't care about alignment here since we just emit integer accesses.
2404 while (Len) {
2405 MVT VT;
2406 if (Len >= 8 && i64Legal)
2407 VT = MVT::i64;
2408 else if (Len >= 4)
2409 VT = MVT::i32;
2410 else if (Len >= 2)
2411 VT = MVT::i16;
2412 else
2413 VT = MVT::i8;
2414
2415 unsigned Reg;
2416 bool RV = X86FastEmitLoad(VT, SrcAM, nullptr, Reg);
2417 RV &= X86FastEmitStore(VT, Reg, /*Kill=*/true, DestAM);
2418 assert(RV && "Failed to emit load or store??");
2419
2420 unsigned Size = VT.getSizeInBits()/8;
2421 Len -= Size;
2422 DestAM.Disp += Size;
2423 SrcAM.Disp += Size;
2424 }
2425
2426 return true;
2427}
2428
2429bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
2430 // FIXME: Handle more intrinsics.
2431 switch (II->getIntrinsicID()) {
2432 default: return false;
Andrea Di Biagio70351782015-02-20 19:37:14 +00002433 case Intrinsic::convert_from_fp16:
2434 case Intrinsic::convert_to_fp16: {
Eric Christopher824f42f2015-05-12 01:26:05 +00002435 if (Subtarget->useSoftFloat() || !Subtarget->hasF16C())
Andrea Di Biagio70351782015-02-20 19:37:14 +00002436 return false;
2437
2438 const Value *Op = II->getArgOperand(0);
2439 unsigned InputReg = getRegForValue(Op);
2440 if (InputReg == 0)
2441 return false;
2442
2443 // F16C only allows converting from float to half and from half to float.
2444 bool IsFloatToHalf = II->getIntrinsicID() == Intrinsic::convert_to_fp16;
2445 if (IsFloatToHalf) {
2446 if (!Op->getType()->isFloatTy())
2447 return false;
2448 } else {
2449 if (!II->getType()->isFloatTy())
2450 return false;
2451 }
2452
2453 unsigned ResultReg = 0;
2454 const TargetRegisterClass *RC = TLI.getRegClassFor(MVT::v8i16);
2455 if (IsFloatToHalf) {
2456 // 'InputReg' is implicitly promoted from register class FR32 to
2457 // register class VR128 by method 'constrainOperandRegClass' which is
2458 // directly called by 'fastEmitInst_ri'.
2459 // Instruction VCVTPS2PHrr takes an extra immediate operand which is
Ahmed Bougacha68a8efa2016-02-02 01:44:03 +00002460 // used to provide rounding control: use MXCSR.RC, encoded as 0b100.
2461 // It's consistent with the other FP instructions, which are usually
2462 // controlled by MXCSR.
2463 InputReg = fastEmitInst_ri(X86::VCVTPS2PHrr, RC, InputReg, false, 4);
Andrea Di Biagio70351782015-02-20 19:37:14 +00002464
2465 // Move the lower 32-bits of ResultReg to another register of class GR32.
2466 ResultReg = createResultReg(&X86::GR32RegClass);
2467 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2468 TII.get(X86::VMOVPDI2DIrr), ResultReg)
2469 .addReg(InputReg, RegState::Kill);
2470
2471 // The result value is in the lower 16-bits of ResultReg.
2472 unsigned RegIdx = X86::sub_16bit;
2473 ResultReg = fastEmitInst_extractsubreg(MVT::i16, ResultReg, true, RegIdx);
2474 } else {
2475 assert(Op->getType()->isIntegerTy(16) && "Expected a 16-bit integer!");
2476 // Explicitly sign-extend the input to 32-bit.
2477 InputReg = fastEmit_r(MVT::i16, MVT::i32, ISD::SIGN_EXTEND, InputReg,
2478 /*Kill=*/false);
2479
2480 // The following SCALAR_TO_VECTOR will be expanded into a VMOVDI2PDIrr.
2481 InputReg = fastEmit_r(MVT::i32, MVT::v4i32, ISD::SCALAR_TO_VECTOR,
2482 InputReg, /*Kill=*/true);
2483
2484 InputReg = fastEmitInst_r(X86::VCVTPH2PSrr, RC, InputReg, /*Kill=*/true);
2485
2486 // The result value is in the lower 32-bits of ResultReg.
2487 // Emit an explicit copy from register class VR128 to register class FR32.
2488 ResultReg = createResultReg(&X86::FR32RegClass);
2489 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2490 TII.get(TargetOpcode::COPY), ResultReg)
2491 .addReg(InputReg, RegState::Kill);
2492 }
2493
2494 updateValueMap(II, ResultReg);
2495 return true;
2496 }
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002497 case Intrinsic::frameaddress: {
David Majnemerca194852015-02-10 22:00:34 +00002498 MachineFunction *MF = FuncInfo.MF;
2499 if (MF->getTarget().getMCAsmInfo()->usesWindowsCFI())
2500 return false;
2501
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002502 Type *RetTy = II->getCalledFunction()->getReturnType();
2503
2504 MVT VT;
2505 if (!isTypeLegal(RetTy, VT))
2506 return false;
2507
2508 unsigned Opc;
2509 const TargetRegisterClass *RC = nullptr;
2510
2511 switch (VT.SimpleTy) {
2512 default: llvm_unreachable("Invalid result type for frameaddress.");
2513 case MVT::i32: Opc = X86::MOV32rm; RC = &X86::GR32RegClass; break;
2514 case MVT::i64: Opc = X86::MOV64rm; RC = &X86::GR64RegClass; break;
2515 }
2516
2517 // This needs to be set before we call getPtrSizedFrameRegister, otherwise
2518 // we get the wrong frame register.
David Majnemerca194852015-02-10 22:00:34 +00002519 MachineFrameInfo *MFI = MF->getFrameInfo();
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002520 MFI->setFrameAddressIsTaken(true);
2521
Eric Christophera1c535b2015-02-02 23:03:45 +00002522 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
David Majnemerca194852015-02-10 22:00:34 +00002523 unsigned FrameReg = RegInfo->getPtrSizedFrameRegister(*MF);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002524 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
2525 (FrameReg == X86::EBP && VT == MVT::i32)) &&
2526 "Invalid Frame Register!");
2527
2528 // Always make a copy of the frame register to to a vreg first, so that we
2529 // never directly reference the frame register (the TwoAddressInstruction-
2530 // Pass doesn't like that).
2531 unsigned SrcReg = createResultReg(RC);
2532 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2533 TII.get(TargetOpcode::COPY), SrcReg).addReg(FrameReg);
2534
2535 // Now recursively load from the frame address.
2536 // movq (%rbp), %rax
2537 // movq (%rax), %rax
2538 // movq (%rax), %rax
2539 // ...
2540 unsigned DestReg;
2541 unsigned Depth = cast<ConstantInt>(II->getOperand(0))->getZExtValue();
2542 while (Depth--) {
2543 DestReg = createResultReg(RC);
2544 addDirectMem(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2545 TII.get(Opc), DestReg), SrcReg);
2546 SrcReg = DestReg;
2547 }
2548
2549 updateValueMap(II, SrcReg);
2550 return true;
2551 }
2552 case Intrinsic::memcpy: {
2553 const MemCpyInst *MCI = cast<MemCpyInst>(II);
2554 // Don't handle volatile or variable length memcpys.
2555 if (MCI->isVolatile())
2556 return false;
2557
2558 if (isa<ConstantInt>(MCI->getLength())) {
2559 // Small memcpy's are common enough that we want to do them
2560 // without a call if possible.
2561 uint64_t Len = cast<ConstantInt>(MCI->getLength())->getZExtValue();
2562 if (IsMemcpySmall(Len)) {
2563 X86AddressMode DestAM, SrcAM;
2564 if (!X86SelectAddress(MCI->getRawDest(), DestAM) ||
2565 !X86SelectAddress(MCI->getRawSource(), SrcAM))
2566 return false;
2567 TryEmitSmallMemcpy(DestAM, SrcAM, Len);
2568 return true;
2569 }
2570 }
2571
2572 unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;
2573 if (!MCI->getLength()->getType()->isIntegerTy(SizeWidth))
2574 return false;
2575
2576 if (MCI->getSourceAddressSpace() > 255 || MCI->getDestAddressSpace() > 255)
2577 return false;
2578
Pete Cooper67cf9a72015-11-19 05:56:52 +00002579 return lowerCallTo(II, "memcpy", II->getNumArgOperands() - 2);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002580 }
2581 case Intrinsic::memset: {
2582 const MemSetInst *MSI = cast<MemSetInst>(II);
2583
2584 if (MSI->isVolatile())
2585 return false;
2586
2587 unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;
2588 if (!MSI->getLength()->getType()->isIntegerTy(SizeWidth))
2589 return false;
2590
2591 if (MSI->getDestAddressSpace() > 255)
2592 return false;
2593
Pete Cooper67cf9a72015-11-19 05:56:52 +00002594 return lowerCallTo(II, "memset", II->getNumArgOperands() - 2);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002595 }
2596 case Intrinsic::stackprotector: {
2597 // Emit code to store the stack guard onto the stack.
Mehdi Amini44ede332015-07-09 02:09:04 +00002598 EVT PtrTy = TLI.getPointerTy(DL);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002599
2600 const Value *Op1 = II->getArgOperand(0); // The guard's value.
2601 const AllocaInst *Slot = cast<AllocaInst>(II->getArgOperand(1));
2602
2603 MFI.setStackProtectorIndex(FuncInfo.StaticAllocaMap[Slot]);
2604
2605 // Grab the frame index.
2606 X86AddressMode AM;
2607 if (!X86SelectAddress(Slot, AM)) return false;
2608 if (!X86FastEmitStore(PtrTy, Op1, AM)) return false;
2609 return true;
2610 }
2611 case Intrinsic::dbg_declare: {
2612 const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
2613 X86AddressMode AM;
2614 assert(DI->getAddress() && "Null address should be checked earlier!");
2615 if (!X86SelectAddress(DI->getAddress(), AM))
2616 return false;
2617 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
2618 // FIXME may need to add RegState::Debug to any registers produced,
2619 // although ESP/EBP should be the only ones at the moment.
Duncan P. N. Exon Smith3bef6a32015-04-03 19:20:26 +00002620 assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
2621 "Expected inlined-at fields to agree");
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002622 addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II), AM)
2623 .addImm(0)
2624 .addMetadata(DI->getVariable())
2625 .addMetadata(DI->getExpression());
2626 return true;
2627 }
2628 case Intrinsic::trap: {
2629 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TRAP));
2630 return true;
2631 }
2632 case Intrinsic::sqrt: {
2633 if (!Subtarget->hasSSE1())
2634 return false;
2635
2636 Type *RetTy = II->getCalledFunction()->getReturnType();
2637
2638 MVT VT;
2639 if (!isTypeLegal(RetTy, VT))
2640 return false;
2641
2642 // Unfortunately we can't use fastEmit_r, because the AVX version of FSQRT
2643 // is not generated by FastISel yet.
2644 // FIXME: Update this code once tablegen can handle it.
Craig Toppercf65c622016-03-02 04:42:31 +00002645 static const uint16_t SqrtOpc[2][2] = {
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002646 {X86::SQRTSSr, X86::VSQRTSSr},
2647 {X86::SQRTSDr, X86::VSQRTSDr}
2648 };
2649 bool HasAVX = Subtarget->hasAVX();
2650 unsigned Opc;
2651 const TargetRegisterClass *RC;
2652 switch (VT.SimpleTy) {
2653 default: return false;
2654 case MVT::f32: Opc = SqrtOpc[0][HasAVX]; RC = &X86::FR32RegClass; break;
2655 case MVT::f64: Opc = SqrtOpc[1][HasAVX]; RC = &X86::FR64RegClass; break;
2656 }
2657
2658 const Value *SrcVal = II->getArgOperand(0);
2659 unsigned SrcReg = getRegForValue(SrcVal);
2660
2661 if (SrcReg == 0)
2662 return false;
2663
2664 unsigned ImplicitDefReg = 0;
2665 if (HasAVX) {
2666 ImplicitDefReg = createResultReg(RC);
2667 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2668 TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
2669 }
2670
2671 unsigned ResultReg = createResultReg(RC);
2672 MachineInstrBuilder MIB;
2673 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
2674 ResultReg);
2675
2676 if (ImplicitDefReg)
2677 MIB.addReg(ImplicitDefReg);
2678
2679 MIB.addReg(SrcReg);
2680
2681 updateValueMap(II, ResultReg);
2682 return true;
2683 }
2684 case Intrinsic::sadd_with_overflow:
2685 case Intrinsic::uadd_with_overflow:
2686 case Intrinsic::ssub_with_overflow:
2687 case Intrinsic::usub_with_overflow:
2688 case Intrinsic::smul_with_overflow:
2689 case Intrinsic::umul_with_overflow: {
2690 // This implements the basic lowering of the xalu with overflow intrinsics
2691 // into add/sub/mul followed by either seto or setb.
2692 const Function *Callee = II->getCalledFunction();
2693 auto *Ty = cast<StructType>(Callee->getReturnType());
2694 Type *RetTy = Ty->getTypeAtIndex(0U);
2695 Type *CondTy = Ty->getTypeAtIndex(1);
2696
2697 MVT VT;
2698 if (!isTypeLegal(RetTy, VT))
2699 return false;
2700
2701 if (VT < MVT::i8 || VT > MVT::i64)
2702 return false;
2703
2704 const Value *LHS = II->getArgOperand(0);
2705 const Value *RHS = II->getArgOperand(1);
2706
2707 // Canonicalize immediate to the RHS.
2708 if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS) &&
2709 isCommutativeIntrinsic(II))
2710 std::swap(LHS, RHS);
2711
2712 bool UseIncDec = false;
2713 if (isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isOne())
2714 UseIncDec = true;
2715
2716 unsigned BaseOpc, CondOpc;
2717 switch (II->getIntrinsicID()) {
2718 default: llvm_unreachable("Unexpected intrinsic!");
2719 case Intrinsic::sadd_with_overflow:
2720 BaseOpc = UseIncDec ? unsigned(X86ISD::INC) : unsigned(ISD::ADD);
2721 CondOpc = X86::SETOr;
2722 break;
2723 case Intrinsic::uadd_with_overflow:
2724 BaseOpc = ISD::ADD; CondOpc = X86::SETBr; break;
2725 case Intrinsic::ssub_with_overflow:
2726 BaseOpc = UseIncDec ? unsigned(X86ISD::DEC) : unsigned(ISD::SUB);
2727 CondOpc = X86::SETOr;
2728 break;
2729 case Intrinsic::usub_with_overflow:
2730 BaseOpc = ISD::SUB; CondOpc = X86::SETBr; break;
2731 case Intrinsic::smul_with_overflow:
2732 BaseOpc = X86ISD::SMUL; CondOpc = X86::SETOr; break;
2733 case Intrinsic::umul_with_overflow:
2734 BaseOpc = X86ISD::UMUL; CondOpc = X86::SETOr; break;
2735 }
2736
2737 unsigned LHSReg = getRegForValue(LHS);
2738 if (LHSReg == 0)
2739 return false;
2740 bool LHSIsKill = hasTrivialKill(LHS);
2741
2742 unsigned ResultReg = 0;
2743 // Check if we have an immediate version.
2744 if (const auto *CI = dyn_cast<ConstantInt>(RHS)) {
Craig Topper66111882016-06-02 04:19:42 +00002745 static const uint16_t Opc[2][4] = {
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002746 { X86::INC8r, X86::INC16r, X86::INC32r, X86::INC64r },
2747 { X86::DEC8r, X86::DEC16r, X86::DEC32r, X86::DEC64r }
2748 };
2749
2750 if (BaseOpc == X86ISD::INC || BaseOpc == X86ISD::DEC) {
2751 ResultReg = createResultReg(TLI.getRegClassFor(VT));
2752 bool IsDec = BaseOpc == X86ISD::DEC;
2753 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2754 TII.get(Opc[IsDec][VT.SimpleTy-MVT::i8]), ResultReg)
2755 .addReg(LHSReg, getKillRegState(LHSIsKill));
2756 } else
2757 ResultReg = fastEmit_ri(VT, VT, BaseOpc, LHSReg, LHSIsKill,
2758 CI->getZExtValue());
2759 }
2760
2761 unsigned RHSReg;
2762 bool RHSIsKill;
2763 if (!ResultReg) {
2764 RHSReg = getRegForValue(RHS);
2765 if (RHSReg == 0)
2766 return false;
2767 RHSIsKill = hasTrivialKill(RHS);
2768 ResultReg = fastEmit_rr(VT, VT, BaseOpc, LHSReg, LHSIsKill, RHSReg,
2769 RHSIsKill);
2770 }
2771
2772 // FastISel doesn't have a pattern for all X86::MUL*r and X86::IMUL*r. Emit
2773 // it manually.
2774 if (BaseOpc == X86ISD::UMUL && !ResultReg) {
Craig Toppercf65c622016-03-02 04:42:31 +00002775 static const uint16_t MULOpc[] =
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002776 { X86::MUL8r, X86::MUL16r, X86::MUL32r, X86::MUL64r };
Craig Toppercf65c622016-03-02 04:42:31 +00002777 static const MCPhysReg Reg[] = { X86::AL, X86::AX, X86::EAX, X86::RAX };
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002778 // First copy the first operand into RAX, which is an implicit input to
2779 // the X86::MUL*r instruction.
2780 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2781 TII.get(TargetOpcode::COPY), Reg[VT.SimpleTy-MVT::i8])
2782 .addReg(LHSReg, getKillRegState(LHSIsKill));
2783 ResultReg = fastEmitInst_r(MULOpc[VT.SimpleTy-MVT::i8],
2784 TLI.getRegClassFor(VT), RHSReg, RHSIsKill);
2785 } else if (BaseOpc == X86ISD::SMUL && !ResultReg) {
Craig Toppercf65c622016-03-02 04:42:31 +00002786 static const uint16_t MULOpc[] =
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002787 { X86::IMUL8r, X86::IMUL16rr, X86::IMUL32rr, X86::IMUL64rr };
2788 if (VT == MVT::i8) {
2789 // Copy the first operand into AL, which is an implicit input to the
2790 // X86::IMUL8r instruction.
2791 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2792 TII.get(TargetOpcode::COPY), X86::AL)
2793 .addReg(LHSReg, getKillRegState(LHSIsKill));
2794 ResultReg = fastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg,
2795 RHSIsKill);
2796 } else
2797 ResultReg = fastEmitInst_rr(MULOpc[VT.SimpleTy-MVT::i8],
2798 TLI.getRegClassFor(VT), LHSReg, LHSIsKill,
2799 RHSReg, RHSIsKill);
2800 }
2801
2802 if (!ResultReg)
2803 return false;
2804
2805 unsigned ResultReg2 = FuncInfo.CreateRegs(CondTy);
2806 assert((ResultReg+1) == ResultReg2 && "Nonconsecutive result registers.");
2807 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CondOpc),
2808 ResultReg2);
2809
2810 updateValueMap(II, ResultReg, 2);
2811 return true;
2812 }
2813 case Intrinsic::x86_sse_cvttss2si:
2814 case Intrinsic::x86_sse_cvttss2si64:
2815 case Intrinsic::x86_sse2_cvttsd2si:
2816 case Intrinsic::x86_sse2_cvttsd2si64: {
2817 bool IsInputDouble;
2818 switch (II->getIntrinsicID()) {
2819 default: llvm_unreachable("Unexpected intrinsic.");
2820 case Intrinsic::x86_sse_cvttss2si:
2821 case Intrinsic::x86_sse_cvttss2si64:
2822 if (!Subtarget->hasSSE1())
2823 return false;
2824 IsInputDouble = false;
2825 break;
2826 case Intrinsic::x86_sse2_cvttsd2si:
2827 case Intrinsic::x86_sse2_cvttsd2si64:
2828 if (!Subtarget->hasSSE2())
2829 return false;
2830 IsInputDouble = true;
2831 break;
2832 }
2833
2834 Type *RetTy = II->getCalledFunction()->getReturnType();
2835 MVT VT;
2836 if (!isTypeLegal(RetTy, VT))
2837 return false;
2838
Craig Topper66111882016-06-02 04:19:42 +00002839 static const uint16_t CvtOpc[2][2][2] = {
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002840 { { X86::CVTTSS2SIrr, X86::VCVTTSS2SIrr },
2841 { X86::CVTTSS2SI64rr, X86::VCVTTSS2SI64rr } },
2842 { { X86::CVTTSD2SIrr, X86::VCVTTSD2SIrr },
2843 { X86::CVTTSD2SI64rr, X86::VCVTTSD2SI64rr } }
2844 };
2845 bool HasAVX = Subtarget->hasAVX();
2846 unsigned Opc;
2847 switch (VT.SimpleTy) {
2848 default: llvm_unreachable("Unexpected result type.");
2849 case MVT::i32: Opc = CvtOpc[IsInputDouble][0][HasAVX]; break;
2850 case MVT::i64: Opc = CvtOpc[IsInputDouble][1][HasAVX]; break;
2851 }
2852
2853 // Check if we can fold insertelement instructions into the convert.
2854 const Value *Op = II->getArgOperand(0);
2855 while (auto *IE = dyn_cast<InsertElementInst>(Op)) {
2856 const Value *Index = IE->getOperand(2);
2857 if (!isa<ConstantInt>(Index))
2858 break;
2859 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
2860
2861 if (Idx == 0) {
2862 Op = IE->getOperand(1);
2863 break;
2864 }
2865 Op = IE->getOperand(0);
2866 }
2867
2868 unsigned Reg = getRegForValue(Op);
2869 if (Reg == 0)
2870 return false;
2871
2872 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
2873 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
2874 .addReg(Reg);
2875
2876 updateValueMap(II, ResultReg);
2877 return true;
2878 }
2879 }
2880}
2881
2882bool X86FastISel::fastLowerArguments() {
2883 if (!FuncInfo.CanLowerReturn)
2884 return false;
2885
2886 const Function *F = FuncInfo.Fn;
2887 if (F->isVarArg())
2888 return false;
2889
2890 CallingConv::ID CC = F->getCallingConv();
2891 if (CC != CallingConv::C)
2892 return false;
2893
2894 if (Subtarget->isCallingConvWin64(CC))
2895 return false;
2896
2897 if (!Subtarget->is64Bit())
2898 return false;
2899
2900 // Only handle simple cases. i.e. Up to 6 i32/i64 scalar arguments.
2901 unsigned GPRCnt = 0;
2902 unsigned FPRCnt = 0;
2903 unsigned Idx = 0;
2904 for (auto const &Arg : F->args()) {
2905 // The first argument is at index 1.
2906 ++Idx;
2907 if (F->getAttributes().hasAttribute(Idx, Attribute::ByVal) ||
2908 F->getAttributes().hasAttribute(Idx, Attribute::InReg) ||
2909 F->getAttributes().hasAttribute(Idx, Attribute::StructRet) ||
Manman Renf46262e2016-03-29 17:37:21 +00002910 F->getAttributes().hasAttribute(Idx, Attribute::SwiftSelf) ||
Manman Ren57518142016-04-11 21:08:06 +00002911 F->getAttributes().hasAttribute(Idx, Attribute::SwiftError) ||
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002912 F->getAttributes().hasAttribute(Idx, Attribute::Nest))
2913 return false;
2914
2915 Type *ArgTy = Arg.getType();
2916 if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy())
2917 return false;
2918
Mehdi Amini44ede332015-07-09 02:09:04 +00002919 EVT ArgVT = TLI.getValueType(DL, ArgTy);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002920 if (!ArgVT.isSimple()) return false;
2921 switch (ArgVT.getSimpleVT().SimpleTy) {
2922 default: return false;
2923 case MVT::i32:
2924 case MVT::i64:
2925 ++GPRCnt;
2926 break;
2927 case MVT::f32:
2928 case MVT::f64:
2929 if (!Subtarget->hasSSE1())
2930 return false;
2931 ++FPRCnt;
2932 break;
2933 }
2934
2935 if (GPRCnt > 6)
2936 return false;
2937
2938 if (FPRCnt > 8)
2939 return false;
2940 }
2941
2942 static const MCPhysReg GPR32ArgRegs[] = {
2943 X86::EDI, X86::ESI, X86::EDX, X86::ECX, X86::R8D, X86::R9D
2944 };
2945 static const MCPhysReg GPR64ArgRegs[] = {
2946 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8 , X86::R9
2947 };
2948 static const MCPhysReg XMMArgRegs[] = {
2949 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2950 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2951 };
2952
2953 unsigned GPRIdx = 0;
2954 unsigned FPRIdx = 0;
2955 for (auto const &Arg : F->args()) {
Mehdi Amini44ede332015-07-09 02:09:04 +00002956 MVT VT = TLI.getSimpleValueType(DL, Arg.getType());
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002957 const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
2958 unsigned SrcReg;
2959 switch (VT.SimpleTy) {
2960 default: llvm_unreachable("Unexpected value type.");
2961 case MVT::i32: SrcReg = GPR32ArgRegs[GPRIdx++]; break;
2962 case MVT::i64: SrcReg = GPR64ArgRegs[GPRIdx++]; break;
2963 case MVT::f32: // fall-through
2964 case MVT::f64: SrcReg = XMMArgRegs[FPRIdx++]; break;
2965 }
2966 unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
2967 // FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
2968 // Without this, EmitLiveInCopies may eliminate the livein if its only
2969 // use is a bitcast (which isn't turned into an instruction).
2970 unsigned ResultReg = createResultReg(RC);
2971 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2972 TII.get(TargetOpcode::COPY), ResultReg)
2973 .addReg(DstReg, getKillRegState(true));
2974 updateValueMap(&Arg, ResultReg);
2975 }
2976 return true;
2977}
2978
2979static unsigned computeBytesPoppedByCallee(const X86Subtarget *Subtarget,
2980 CallingConv::ID CC,
2981 ImmutableCallSite *CS) {
2982 if (Subtarget->is64Bit())
2983 return 0;
2984 if (Subtarget->getTargetTriple().isOSMSVCRT())
2985 return 0;
2986 if (CC == CallingConv::Fast || CC == CallingConv::GHC ||
2987 CC == CallingConv::HiPE)
2988 return 0;
Sanjoy Dasb11b4402015-11-04 20:33:45 +00002989
2990 if (CS)
2991 if (CS->arg_empty() || !CS->paramHasAttr(1, Attribute::StructRet) ||
Michael Kuperstein2ea81ba2015-12-28 14:39:21 +00002992 CS->paramHasAttr(1, Attribute::InReg) || Subtarget->isTargetMCU())
Sanjoy Dasb11b4402015-11-04 20:33:45 +00002993 return 0;
2994
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002995 return 4;
2996}
2997
2998bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
2999 auto &OutVals = CLI.OutVals;
3000 auto &OutFlags = CLI.OutFlags;
3001 auto &OutRegs = CLI.OutRegs;
3002 auto &Ins = CLI.Ins;
3003 auto &InRegs = CLI.InRegs;
3004 CallingConv::ID CC = CLI.CallConv;
3005 bool &IsTailCall = CLI.IsTailCall;
3006 bool IsVarArg = CLI.IsVarArg;
3007 const Value *Callee = CLI.Callee;
Rafael Espindolace4c2bc2015-06-23 12:21:54 +00003008 MCSymbol *Symbol = CLI.Symbol;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00003009
3010 bool Is64Bit = Subtarget->is64Bit();
3011 bool IsWin64 = Subtarget->isCallingConvWin64(CC);
3012
3013 // Handle only C, fastcc, and webkit_js calling conventions for now.
3014 switch (CC) {
3015 default: return false;
3016 case CallingConv::C:
3017 case CallingConv::Fast:
3018 case CallingConv::WebKit_JS:
Manman Renf8bdd882016-04-05 22:41:47 +00003019 case CallingConv::Swift:
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00003020 case CallingConv::X86_FastCall:
3021 case CallingConv::X86_64_Win64:
3022 case CallingConv::X86_64_SysV:
3023 break;
3024 }
3025
3026 // Allow SelectionDAG isel to handle tail calls.
3027 if (IsTailCall)
3028 return false;
3029
3030 // fastcc with -tailcallopt is intended to provide a guaranteed
3031 // tail call optimization. Fastisel doesn't know how to do that.
3032 if (CC == CallingConv::Fast && TM.Options.GuaranteedTailCallOpt)
3033 return false;
3034
3035 // Don't know how to handle Win64 varargs yet. Nothing special needed for
3036 // x86-32. Special handling for x86-64 is implemented.
3037 if (IsVarArg && IsWin64)
3038 return false;
3039
3040 // Don't know about inalloca yet.
3041 if (CLI.CS && CLI.CS->hasInAllocaArgument())
3042 return false;
3043
Manman Ren57518142016-04-11 21:08:06 +00003044 for (auto Flag : CLI.OutFlags)
3045 if (Flag.isSwiftError())
3046 return false;
3047
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00003048 // Fast-isel doesn't know about callee-pop yet.
3049 if (X86::isCalleePop(CC, Subtarget->is64Bit(), IsVarArg,
3050 TM.Options.GuaranteedTailCallOpt))
3051 return false;
3052
3053 SmallVector<MVT, 16> OutVTs;
3054 SmallVector<unsigned, 16> ArgRegs;
3055
3056 // If this is a constant i1/i8/i16 argument, promote to i32 to avoid an extra
3057 // instruction. This is safe because it is common to all FastISel supported
3058 // calling conventions on x86.
3059 for (int i = 0, e = OutVals.size(); i != e; ++i) {
3060 Value *&Val = OutVals[i];
3061 ISD::ArgFlagsTy Flags = OutFlags[i];
3062 if (auto *CI = dyn_cast<ConstantInt>(Val)) {
3063 if (CI->getBitWidth() < 32) {
3064 if (Flags.isSExt())
3065 Val = ConstantExpr::getSExt(CI, Type::getInt32Ty(CI->getContext()));
3066 else
3067 Val = ConstantExpr::getZExt(CI, Type::getInt32Ty(CI->getContext()));
3068 }
3069 }
3070
3071 // Passing bools around ends up doing a trunc to i1 and passing it.
3072 // Codegen this as an argument + "and 1".
3073 MVT VT;
3074 auto *TI = dyn_cast<TruncInst>(Val);
3075 unsigned ResultReg;
3076 if (TI && TI->getType()->isIntegerTy(1) && CLI.CS &&
3077 (TI->getParent() == CLI.CS->getInstruction()->getParent()) &&
3078 TI->hasOneUse()) {
3079 Value *PrevVal = TI->getOperand(0);
3080 ResultReg = getRegForValue(PrevVal);
3081
3082 if (!ResultReg)
3083 return false;
3084
3085 if (!isTypeLegal(PrevVal->getType(), VT))
3086 return false;
3087
3088 ResultReg =
3089 fastEmit_ri(VT, VT, ISD::AND, ResultReg, hasTrivialKill(PrevVal), 1);
3090 } else {
3091 if (!isTypeLegal(Val->getType(), VT))
3092 return false;
3093 ResultReg = getRegForValue(Val);
3094 }
3095
3096 if (!ResultReg)
3097 return false;
3098
3099 ArgRegs.push_back(ResultReg);
3100 OutVTs.push_back(VT);
3101 }
3102
3103 // Analyze operands of the call, assigning locations to each operand.
3104 SmallVector<CCValAssign, 16> ArgLocs;
3105 CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, ArgLocs, CLI.RetTy->getContext());
3106
3107 // Allocate shadow area for Win64
3108 if (IsWin64)
3109 CCInfo.AllocateStack(32, 8);
3110
3111 CCInfo.AnalyzeCallOperands(OutVTs, OutFlags, CC_X86);
3112
3113 // Get a count of how many bytes are to be pushed on the stack.
Jeroen Ketema740f9d72015-09-29 10:12:57 +00003114 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00003115
3116 // Issue CALLSEQ_START
3117 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
3118 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown))
Michael Kuperstein13fbd452015-02-01 16:56:04 +00003119 .addImm(NumBytes).addImm(0);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00003120
3121 // Walk the register/memloc assignments, inserting copies/loads.
Eric Christophera1c535b2015-02-02 23:03:45 +00003122 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00003123 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3124 CCValAssign const &VA = ArgLocs[i];
3125 const Value *ArgVal = OutVals[VA.getValNo()];
3126 MVT ArgVT = OutVTs[VA.getValNo()];
3127
3128 if (ArgVT == MVT::x86mmx)
3129 return false;
3130
3131 unsigned ArgReg = ArgRegs[VA.getValNo()];
3132
3133 // Promote the value if needed.
3134 switch (VA.getLocInfo()) {
3135 case CCValAssign::Full: break;
3136 case CCValAssign::SExt: {
3137 assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
3138 "Unexpected extend");
David Majnemer2c5aeab2016-05-04 00:22:23 +00003139
3140 if (ArgVT.SimpleTy == MVT::i1)
3141 return false;
3142
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00003143 bool Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), ArgReg,
3144 ArgVT, ArgReg);
3145 assert(Emitted && "Failed to emit a sext!"); (void)Emitted;
3146 ArgVT = VA.getLocVT();
3147 break;
3148 }
3149 case CCValAssign::ZExt: {
3150 assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
3151 "Unexpected extend");
David Majnemer2c5aeab2016-05-04 00:22:23 +00003152
3153 // Handle zero-extension from i1 to i8, which is common.
3154 if (ArgVT.SimpleTy == MVT::i1) {
3155 // Set the high bits to zero.
3156 ArgReg = fastEmitZExtFromI1(MVT::i8, ArgReg, /*TODO: Kill=*/false);
3157 ArgVT = MVT::i8;
3158
3159 if (ArgReg == 0)
3160 return false;
3161 }
3162
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00003163 bool Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), ArgReg,
3164 ArgVT, ArgReg);
3165 assert(Emitted && "Failed to emit a zext!"); (void)Emitted;
3166 ArgVT = VA.getLocVT();
3167 break;
3168 }
3169 case CCValAssign::AExt: {
3170 assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
3171 "Unexpected extend");
3172 bool Emitted = X86FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(), ArgReg,
3173 ArgVT, ArgReg);
3174 if (!Emitted)
3175 Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), ArgReg,
3176 ArgVT, ArgReg);
3177 if (!Emitted)
3178 Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), ArgReg,
3179 ArgVT, ArgReg);
3180
3181 assert(Emitted && "Failed to emit a aext!"); (void)Emitted;
3182 ArgVT = VA.getLocVT();
3183 break;
3184 }
3185 case CCValAssign::BCvt: {
3186 ArgReg = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, ArgReg,
3187 /*TODO: Kill=*/false);
3188 assert(ArgReg && "Failed to emit a bitcast!");
3189 ArgVT = VA.getLocVT();
3190 break;
3191 }
3192 case CCValAssign::VExt:
3193 // VExt has not been implemented, so this should be impossible to reach
3194 // for now. However, fallback to Selection DAG isel once implemented.
3195 return false;
3196 case CCValAssign::AExtUpper:
3197 case CCValAssign::SExtUpper:
3198 case CCValAssign::ZExtUpper:
3199 case CCValAssign::FPExt:
3200 llvm_unreachable("Unexpected loc info!");
3201 case CCValAssign::Indirect:
3202 // FIXME: Indirect doesn't need extending, but fast-isel doesn't fully
3203 // support this.
3204 return false;
3205 }
3206
3207 if (VA.isRegLoc()) {
3208 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3209 TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg);
3210 OutRegs.push_back(VA.getLocReg());
3211 } else {
3212 assert(VA.isMemLoc());
3213
3214 // Don't emit stores for undef values.
3215 if (isa<UndefValue>(ArgVal))
3216 continue;
3217
3218 unsigned LocMemOffset = VA.getLocMemOffset();
3219 X86AddressMode AM;
3220 AM.Base.Reg = RegInfo->getStackRegister();
3221 AM.Disp = LocMemOffset;
3222 ISD::ArgFlagsTy Flags = OutFlags[VA.getValNo()];
3223 unsigned Alignment = DL.getABITypeAlignment(ArgVal->getType());
3224 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
Alex Lorenze40c8a22015-08-11 23:09:45 +00003225 MachinePointerInfo::getStack(*FuncInfo.MF, LocMemOffset),
3226 MachineMemOperand::MOStore, ArgVT.getStoreSize(), Alignment);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00003227 if (Flags.isByVal()) {
3228 X86AddressMode SrcAM;
3229 SrcAM.Base.Reg = ArgReg;
3230 if (!TryEmitSmallMemcpy(AM, SrcAM, Flags.getByValSize()))
3231 return false;
3232 } else if (isa<ConstantInt>(ArgVal) || isa<ConstantPointerNull>(ArgVal)) {
3233 // If this is a really simple value, emit this with the Value* version
3234 // of X86FastEmitStore. If it isn't simple, we don't want to do this,
3235 // as it can cause us to reevaluate the argument.
3236 if (!X86FastEmitStore(ArgVT, ArgVal, AM, MMO))
3237 return false;
3238 } else {
3239 bool ValIsKill = hasTrivialKill(ArgVal);
3240 if (!X86FastEmitStore(ArgVT, ArgReg, ValIsKill, AM, MMO))
3241 return false;
3242 }
3243 }
3244 }
3245
3246 // ELF / PIC requires GOT in the EBX register before function calls via PLT
3247 // GOT pointer.
3248 if (Subtarget->isPICStyleGOT()) {
3249 unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
3250 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3251 TII.get(TargetOpcode::COPY), X86::EBX).addReg(Base);
3252 }
3253
3254 if (Is64Bit && IsVarArg && !IsWin64) {
3255 // From AMD64 ABI document:
3256 // For calls that may call functions that use varargs or stdargs
3257 // (prototype-less calls or calls to functions containing ellipsis (...) in
3258 // the declaration) %al is used as hidden argument to specify the number
3259 // of SSE registers used. The contents of %al do not need to match exactly
3260 // the number of registers, but must be an ubound on the number of SSE
3261 // registers used and is in the range 0 - 8 inclusive.
3262
3263 // Count the number of XMM registers allocated.
3264 static const MCPhysReg XMMArgRegs[] = {
3265 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3266 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3267 };
Tim Northover3b6b7ca2015-02-21 02:11:17 +00003268 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00003269 assert((Subtarget->hasSSE1() || !NumXMMRegs)
3270 && "SSE registers cannot be used when SSE is disabled");
3271 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV8ri),
3272 X86::AL).addImm(NumXMMRegs);
3273 }
3274
3275 // Materialize callee address in a register. FIXME: GV address can be
3276 // handled with a CALLpcrel32 instead.
3277 X86AddressMode CalleeAM;
3278 if (!X86SelectCallAddress(Callee, CalleeAM))
3279 return false;
3280
3281 unsigned CalleeOp = 0;
3282 const GlobalValue *GV = nullptr;
3283 if (CalleeAM.GV != nullptr) {
3284 GV = CalleeAM.GV;
3285 } else if (CalleeAM.Base.Reg != 0) {
3286 CalleeOp = CalleeAM.Base.Reg;
3287 } else
3288 return false;
3289
3290 // Issue the call.
3291 MachineInstrBuilder MIB;
3292 if (CalleeOp) {
3293 // Register-indirect call.
3294 unsigned CallOpc = Is64Bit ? X86::CALL64r : X86::CALL32r;
3295 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CallOpc))
3296 .addReg(CalleeOp);
3297 } else {
3298 // Direct call.
3299 assert(GV && "Not a direct call");
3300 unsigned CallOpc = Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32;
3301
3302 // See if we need any target-specific flags on the GV operand.
Rafael Espindola46107b92016-05-19 18:49:29 +00003303 unsigned char OpFlags = Subtarget->classifyGlobalFunctionReference(GV);
Asaf Badouh89406d12016-04-20 08:32:57 +00003304 // Ignore NonLazyBind attribute in FastISel
3305 if (OpFlags == X86II::MO_GOTPCREL)
3306 OpFlags = 0;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00003307
3308 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CallOpc));
Rafael Espindolace4c2bc2015-06-23 12:21:54 +00003309 if (Symbol)
3310 MIB.addSym(Symbol, OpFlags);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00003311 else
3312 MIB.addGlobalAddress(GV, 0, OpFlags);
3313 }
3314
3315 // Add a register mask operand representing the call-preserved registers.
3316 // Proper defs for return values will be added by setPhysRegsDeadExcept().
Eric Christopher9deb75d2015-03-11 22:42:13 +00003317 MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00003318
3319 // Add an implicit use GOT pointer in EBX.
3320 if (Subtarget->isPICStyleGOT())
3321 MIB.addReg(X86::EBX, RegState::Implicit);
3322
3323 if (Is64Bit && IsVarArg && !IsWin64)
3324 MIB.addReg(X86::AL, RegState::Implicit);
3325
3326 // Add implicit physical register uses to the call.
3327 for (auto Reg : OutRegs)
3328 MIB.addReg(Reg, RegState::Implicit);
3329
3330 // Issue CALLSEQ_END
3331 unsigned NumBytesForCalleeToPop =
3332 computeBytesPoppedByCallee(Subtarget, CC, CLI.CS);
3333 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
3334 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
3335 .addImm(NumBytes).addImm(NumBytesForCalleeToPop);
3336
3337 // Now handle call return values.
3338 SmallVector<CCValAssign, 16> RVLocs;
3339 CCState CCRetInfo(CC, IsVarArg, *FuncInfo.MF, RVLocs,
3340 CLI.RetTy->getContext());
3341 CCRetInfo.AnalyzeCallResult(Ins, RetCC_X86);
3342
3343 // Copy all of the result registers out of their specified physreg.
3344 unsigned ResultReg = FuncInfo.CreateRegs(CLI.RetTy);
3345 for (unsigned i = 0; i != RVLocs.size(); ++i) {
3346 CCValAssign &VA = RVLocs[i];
3347 EVT CopyVT = VA.getValVT();
3348 unsigned CopyReg = ResultReg + i;
3349
3350 // If this is x86-64, and we disabled SSE, we can't return FP values
3351 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
3352 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
3353 report_fatal_error("SSE register return with SSE disabled");
3354 }
3355
3356 // If we prefer to use the value in xmm registers, copy it out as f80 and
3357 // use a truncate to move it from fp stack reg to xmm reg.
3358 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
3359 isScalarFPTypeInSSEReg(VA.getValVT())) {
3360 CopyVT = MVT::f80;
3361 CopyReg = createResultReg(&X86::RFP80RegClass);
3362 }
3363
3364 // Copy out the result.
3365 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3366 TII.get(TargetOpcode::COPY), CopyReg).addReg(VA.getLocReg());
3367 InRegs.push_back(VA.getLocReg());
3368
3369 // Round the f80 to the right size, which also moves it to the appropriate
3370 // xmm register. This is accomplished by storing the f80 value in memory
3371 // and then loading it back.
3372 if (CopyVT != VA.getValVT()) {
3373 EVT ResVT = VA.getValVT();
3374 unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
3375 unsigned MemSize = ResVT.getSizeInBits()/8;
3376 int FI = MFI.CreateStackObject(MemSize, MemSize, false);
3377 addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3378 TII.get(Opc)), FI)
3379 .addReg(CopyReg);
3380 Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm;
3381 addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3382 TII.get(Opc), ResultReg + i), FI);
3383 }
3384 }
3385
3386 CLI.ResultReg = ResultReg;
3387 CLI.NumResultRegs = RVLocs.size();
3388 CLI.Call = MIB;
3389
3390 return true;
3391}
3392
3393bool
3394X86FastISel::fastSelectInstruction(const Instruction *I) {
3395 switch (I->getOpcode()) {
3396 default: break;
3397 case Instruction::Load:
3398 return X86SelectLoad(I);
3399 case Instruction::Store:
3400 return X86SelectStore(I);
3401 case Instruction::Ret:
3402 return X86SelectRet(I);
3403 case Instruction::ICmp:
3404 case Instruction::FCmp:
3405 return X86SelectCmp(I);
3406 case Instruction::ZExt:
3407 return X86SelectZExt(I);
3408 case Instruction::Br:
3409 return X86SelectBranch(I);
3410 case Instruction::LShr:
3411 case Instruction::AShr:
3412 case Instruction::Shl:
3413 return X86SelectShift(I);
3414 case Instruction::SDiv:
3415 case Instruction::UDiv:
3416 case Instruction::SRem:
3417 case Instruction::URem:
3418 return X86SelectDivRem(I);
3419 case Instruction::Select:
3420 return X86SelectSelect(I);
3421 case Instruction::Trunc:
3422 return X86SelectTrunc(I);
3423 case Instruction::FPExt:
3424 return X86SelectFPExt(I);
3425 case Instruction::FPTrunc:
3426 return X86SelectFPTrunc(I);
Andrea Di Biagioe7b58ee2015-02-17 23:40:58 +00003427 case Instruction::SIToFP:
3428 return X86SelectSIToFP(I);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00003429 case Instruction::IntToPtr: // Deliberate fall-through.
3430 case Instruction::PtrToInt: {
Mehdi Amini44ede332015-07-09 02:09:04 +00003431 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
3432 EVT DstVT = TLI.getValueType(DL, I->getType());
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00003433 if (DstVT.bitsGT(SrcVT))
3434 return X86SelectZExt(I);
3435 if (DstVT.bitsLT(SrcVT))
3436 return X86SelectTrunc(I);
3437 unsigned Reg = getRegForValue(I->getOperand(0));
3438 if (Reg == 0) return false;
3439 updateValueMap(I, Reg);
3440 return true;
3441 }
Andrea Di Biagio77f62652015-10-02 16:08:05 +00003442 case Instruction::BitCast: {
3443 // Select SSE2/AVX bitcasts between 128/256 bit vector types.
3444 if (!Subtarget->hasSSE2())
3445 return false;
3446
3447 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
3448 EVT DstVT = TLI.getValueType(DL, I->getType());
3449
3450 if (!SrcVT.isSimple() || !DstVT.isSimple())
3451 return false;
3452
3453 if (!SrcVT.is128BitVector() &&
3454 !(Subtarget->hasAVX() && SrcVT.is256BitVector()))
3455 return false;
3456
3457 unsigned Reg = getRegForValue(I->getOperand(0));
3458 if (Reg == 0)
3459 return false;
3460
3461 // No instruction is needed for conversion. Reuse the register used by
3462 // the fist operand.
3463 updateValueMap(I, Reg);
3464 return true;
3465 }
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00003466 }
3467
3468 return false;
3469}
3470
3471unsigned X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) {
3472 if (VT > MVT::i64)
3473 return 0;
3474
3475 uint64_t Imm = CI->getZExtValue();
3476 if (Imm == 0) {
3477 unsigned SrcReg = fastEmitInst_(X86::MOV32r0, &X86::GR32RegClass);
3478 switch (VT.SimpleTy) {
3479 default: llvm_unreachable("Unexpected value type");
3480 case MVT::i1:
3481 case MVT::i8:
3482 return fastEmitInst_extractsubreg(MVT::i8, SrcReg, /*Kill=*/true,
3483 X86::sub_8bit);
3484 case MVT::i16:
3485 return fastEmitInst_extractsubreg(MVT::i16, SrcReg, /*Kill=*/true,
3486 X86::sub_16bit);
3487 case MVT::i32:
3488 return SrcReg;
3489 case MVT::i64: {
3490 unsigned ResultReg = createResultReg(&X86::GR64RegClass);
3491 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3492 TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg)
3493 .addImm(0).addReg(SrcReg).addImm(X86::sub_32bit);
3494 return ResultReg;
3495 }
3496 }
3497 }
3498
3499 unsigned Opc = 0;
3500 switch (VT.SimpleTy) {
3501 default: llvm_unreachable("Unexpected value type");
3502 case MVT::i1: VT = MVT::i8; // fall-through
3503 case MVT::i8: Opc = X86::MOV8ri; break;
3504 case MVT::i16: Opc = X86::MOV16ri; break;
3505 case MVT::i32: Opc = X86::MOV32ri; break;
3506 case MVT::i64: {
3507 if (isUInt<32>(Imm))
3508 Opc = X86::MOV32ri;
3509 else if (isInt<32>(Imm))
3510 Opc = X86::MOV64ri32;
3511 else
3512 Opc = X86::MOV64ri;
3513 break;
3514 }
3515 }
3516 if (VT == MVT::i64 && Opc == X86::MOV32ri) {
3517 unsigned SrcReg = fastEmitInst_i(Opc, &X86::GR32RegClass, Imm);
3518 unsigned ResultReg = createResultReg(&X86::GR64RegClass);
3519 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3520 TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg)
3521 .addImm(0).addReg(SrcReg).addImm(X86::sub_32bit);
3522 return ResultReg;
3523 }
3524 return fastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
3525}
3526
3527unsigned X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) {
3528 if (CFP->isNullValue())
3529 return fastMaterializeFloatZero(CFP);
3530
3531 // Can't handle alternate code models yet.
3532 CodeModel::Model CM = TM.getCodeModel();
3533 if (CM != CodeModel::Small && CM != CodeModel::Large)
3534 return 0;
3535
3536 // Get opcode and regclass of the output for the given load instruction.
3537 unsigned Opc = 0;
3538 const TargetRegisterClass *RC = nullptr;
3539 switch (VT.SimpleTy) {
3540 default: return 0;
3541 case MVT::f32:
3542 if (X86ScalarSSEf32) {
3543 Opc = Subtarget->hasAVX() ? X86::VMOVSSrm : X86::MOVSSrm;
3544 RC = &X86::FR32RegClass;
3545 } else {
3546 Opc = X86::LD_Fp32m;
3547 RC = &X86::RFP32RegClass;
3548 }
3549 break;
3550 case MVT::f64:
3551 if (X86ScalarSSEf64) {
3552 Opc = Subtarget->hasAVX() ? X86::VMOVSDrm : X86::MOVSDrm;
3553 RC = &X86::FR64RegClass;
3554 } else {
3555 Opc = X86::LD_Fp64m;
3556 RC = &X86::RFP64RegClass;
3557 }
3558 break;
3559 case MVT::f80:
3560 // No f80 support yet.
3561 return 0;
3562 }
3563
3564 // MachineConstantPool wants an explicit alignment.
3565 unsigned Align = DL.getPrefTypeAlignment(CFP->getType());
3566 if (Align == 0) {
3567 // Alignment of vector types. FIXME!
3568 Align = DL.getTypeAllocSize(CFP->getType());
3569 }
3570
3571 // x86-32 PIC requires a PIC base register for constant pools.
3572 unsigned PICBase = 0;
Rafael Espindolac7e98132016-05-20 12:20:10 +00003573 unsigned char OpFlag = Subtarget->classifyLocalReference(nullptr);
3574 if (OpFlag == X86II::MO_PIC_BASE_OFFSET)
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00003575 PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
Rafael Espindolac7e98132016-05-20 12:20:10 +00003576 else if (OpFlag == X86II::MO_GOTOFF)
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00003577 PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
Rafael Espindolac7e98132016-05-20 12:20:10 +00003578 else if (Subtarget->is64Bit() && TM.getCodeModel() == CodeModel::Small)
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00003579 PICBase = X86::RIP;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00003580
3581 // Create the load from the constant pool.
3582 unsigned CPI = MCP.getConstantPoolIndex(CFP, Align);
3583 unsigned ResultReg = createResultReg(RC);
3584
3585 if (CM == CodeModel::Large) {
3586 unsigned AddrReg = createResultReg(&X86::GR64RegClass);
3587 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV64ri),
3588 AddrReg)
3589 .addConstantPoolIndex(CPI, 0, OpFlag);
3590 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3591 TII.get(Opc), ResultReg);
3592 addDirectMem(MIB, AddrReg);
3593 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
Alex Lorenze40c8a22015-08-11 23:09:45 +00003594 MachinePointerInfo::getConstantPool(*FuncInfo.MF),
3595 MachineMemOperand::MOLoad, DL.getPointerSize(), Align);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00003596 MIB->addMemOperand(*FuncInfo.MF, MMO);
3597 return ResultReg;
3598 }
3599
3600 addConstantPoolReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3601 TII.get(Opc), ResultReg),
3602 CPI, PICBase, OpFlag);
3603 return ResultReg;
3604}
3605
3606unsigned X86FastISel::X86MaterializeGV(const GlobalValue *GV, MVT VT) {
3607 // Can't handle alternate code models yet.
3608 if (TM.getCodeModel() != CodeModel::Small)
3609 return 0;
3610
3611 // Materialize addresses with LEA/MOV instructions.
3612 X86AddressMode AM;
3613 if (X86SelectAddress(GV, AM)) {
3614 // If the expression is just a basereg, then we're done, otherwise we need
3615 // to emit an LEA.
3616 if (AM.BaseType == X86AddressMode::RegBase &&
3617 AM.IndexReg == 0 && AM.Disp == 0 && AM.GV == nullptr)
3618 return AM.Base.Reg;
3619
3620 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
3621 if (TM.getRelocationModel() == Reloc::Static &&
Mehdi Amini44ede332015-07-09 02:09:04 +00003622 TLI.getPointerTy(DL) == MVT::i64) {
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00003623 // The displacement code could be more than 32 bits away so we need to use
3624 // an instruction with a 64 bit immediate
3625 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV64ri),
3626 ResultReg)
3627 .addGlobalAddress(GV);
3628 } else {
Mehdi Amini44ede332015-07-09 02:09:04 +00003629 unsigned Opc =
3630 TLI.getPointerTy(DL) == MVT::i32
3631 ? (Subtarget->isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r)
3632 : X86::LEA64r;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00003633 addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3634 TII.get(Opc), ResultReg), AM);
3635 }
3636 return ResultReg;
3637 }
3638 return 0;
3639}
3640
3641unsigned X86FastISel::fastMaterializeConstant(const Constant *C) {
Mehdi Amini44ede332015-07-09 02:09:04 +00003642 EVT CEVT = TLI.getValueType(DL, C->getType(), true);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00003643
3644 // Only handle simple types.
3645 if (!CEVT.isSimple())
3646 return 0;
3647 MVT VT = CEVT.getSimpleVT();
3648
3649 if (const auto *CI = dyn_cast<ConstantInt>(C))
3650 return X86MaterializeInt(CI, VT);
3651 else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
3652 return X86MaterializeFP(CFP, VT);
3653 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
3654 return X86MaterializeGV(GV, VT);
3655
3656 return 0;
3657}
3658
3659unsigned X86FastISel::fastMaterializeAlloca(const AllocaInst *C) {
3660 // Fail on dynamic allocas. At this point, getRegForValue has already
3661 // checked its CSE maps, so if we're here trying to handle a dynamic
3662 // alloca, we're not going to succeed. X86SelectAddress has a
3663 // check for dynamic allocas, because it's called directly from
3664 // various places, but targetMaterializeAlloca also needs a check
3665 // in order to avoid recursion between getRegForValue,
3666 // X86SelectAddrss, and targetMaterializeAlloca.
3667 if (!FuncInfo.StaticAllocaMap.count(C))
3668 return 0;
3669 assert(C->isStaticAlloca() && "dynamic alloca in the static alloca map?");
3670
3671 X86AddressMode AM;
3672 if (!X86SelectAddress(C, AM))
3673 return 0;
Mehdi Amini44ede332015-07-09 02:09:04 +00003674 unsigned Opc =
3675 TLI.getPointerTy(DL) == MVT::i32
3676 ? (Subtarget->isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r)
3677 : X86::LEA64r;
3678 const TargetRegisterClass *RC = TLI.getRegClassFor(TLI.getPointerTy(DL));
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00003679 unsigned ResultReg = createResultReg(RC);
3680 addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3681 TII.get(Opc), ResultReg), AM);
3682 return ResultReg;
3683}
3684
3685unsigned X86FastISel::fastMaterializeFloatZero(const ConstantFP *CF) {
3686 MVT VT;
3687 if (!isTypeLegal(CF->getType(), VT))
3688 return 0;
3689
3690 // Get opcode and regclass for the given zero.
3691 unsigned Opc = 0;
3692 const TargetRegisterClass *RC = nullptr;
3693 switch (VT.SimpleTy) {
3694 default: return 0;
3695 case MVT::f32:
3696 if (X86ScalarSSEf32) {
3697 Opc = X86::FsFLD0SS;
3698 RC = &X86::FR32RegClass;
3699 } else {
3700 Opc = X86::LD_Fp032;
3701 RC = &X86::RFP32RegClass;
3702 }
3703 break;
3704 case MVT::f64:
3705 if (X86ScalarSSEf64) {
3706 Opc = X86::FsFLD0SD;
3707 RC = &X86::FR64RegClass;
3708 } else {
3709 Opc = X86::LD_Fp064;
3710 RC = &X86::RFP64RegClass;
3711 }
3712 break;
3713 case MVT::f80:
3714 // No f80 support yet.
3715 return 0;
3716 }
3717
3718 unsigned ResultReg = createResultReg(RC);
3719 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg);
3720 return ResultReg;
3721}
3722
3723
3724bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
3725 const LoadInst *LI) {
3726 const Value *Ptr = LI->getPointerOperand();
3727 X86AddressMode AM;
3728 if (!X86SelectAddress(Ptr, AM))
3729 return false;
3730
3731 const X86InstrInfo &XII = (const X86InstrInfo &)TII;
3732
3733 unsigned Size = DL.getTypeAllocSize(LI->getType());
3734 unsigned Alignment = LI->getAlignment();
3735
3736 if (Alignment == 0) // Ensure that codegen never sees alignment 0
3737 Alignment = DL.getABITypeAlignment(LI->getType());
3738
3739 SmallVector<MachineOperand, 8> AddrOps;
3740 AM.getFullAddress(AddrOps);
3741
Keno Fischere70b31f2015-06-08 20:09:58 +00003742 MachineInstr *Result = XII.foldMemoryOperandImpl(
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003743 *FuncInfo.MF, *MI, OpNo, AddrOps, FuncInfo.InsertPt, Size, Alignment,
Keno Fischere70b31f2015-06-08 20:09:58 +00003744 /*AllowCommute=*/true);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00003745 if (!Result)
3746 return false;
3747
Pete Cooperd31583d2015-05-06 21:37:19 +00003748 // The index register could be in the wrong register class. Unfortunately,
3749 // foldMemoryOperandImpl could have commuted the instruction so its not enough
3750 // to just look at OpNo + the offset to the index reg. We actually need to
3751 // scan the instruction to find the index reg and see if its the correct reg
3752 // class.
Matthias Braune41e1462015-05-29 02:56:46 +00003753 unsigned OperandNo = 0;
3754 for (MachineInstr::mop_iterator I = Result->operands_begin(),
3755 E = Result->operands_end(); I != E; ++I, ++OperandNo) {
3756 MachineOperand &MO = *I;
3757 if (!MO.isReg() || MO.isDef() || MO.getReg() != AM.IndexReg)
Pete Cooperd31583d2015-05-06 21:37:19 +00003758 continue;
3759 // Found the index reg, now try to rewrite it.
Pete Cooperd31583d2015-05-06 21:37:19 +00003760 unsigned IndexReg = constrainOperandRegClass(Result->getDesc(),
Matthias Braune41e1462015-05-29 02:56:46 +00003761 MO.getReg(), OperandNo);
3762 if (IndexReg == MO.getReg())
Pete Cooperd31583d2015-05-06 21:37:19 +00003763 continue;
Matthias Braune41e1462015-05-29 02:56:46 +00003764 MO.setReg(IndexReg);
Pete Cooperd31583d2015-05-06 21:37:19 +00003765 }
3766
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00003767 Result->addMemOperand(*FuncInfo.MF, createMachineMemOperandFor(LI));
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00003768 MI->eraseFromParent();
3769 return true;
3770}
3771
3772
3773namespace llvm {
3774 FastISel *X86::createFastISel(FunctionLoweringInfo &funcInfo,
3775 const TargetLibraryInfo *libInfo) {
3776 return new X86FastISel(funcInfo, libInfo);
3777 }
3778}