blob: 91ecdf10c99c0aedc99e631532899228d5f6f6af [file] [log] [blame]
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001//===-- X86FastISel.cpp - X86 FastISel implementation ---------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the X86-specific support for the FastISel class. Much
11// of the target-specific code is generated by tablegen in the file
12// X86GenFastISel.inc, which is #included here.
13//
14//===----------------------------------------------------------------------===//
15
16#include "X86.h"
17#include "X86CallingConv.h"
18#include "X86InstrBuilder.h"
19#include "X86InstrInfo.h"
20#include "X86MachineFunctionInfo.h"
21#include "X86RegisterInfo.h"
22#include "X86Subtarget.h"
23#include "X86TargetMachine.h"
24#include "llvm/Analysis/BranchProbabilityInfo.h"
25#include "llvm/CodeGen/Analysis.h"
26#include "llvm/CodeGen/FastISel.h"
27#include "llvm/CodeGen/FunctionLoweringInfo.h"
28#include "llvm/CodeGen/MachineConstantPool.h"
29#include "llvm/CodeGen/MachineFrameInfo.h"
30#include "llvm/CodeGen/MachineRegisterInfo.h"
31#include "llvm/IR/CallSite.h"
32#include "llvm/IR/CallingConv.h"
33#include "llvm/IR/DerivedTypes.h"
34#include "llvm/IR/GetElementPtrTypeIterator.h"
35#include "llvm/IR/GlobalAlias.h"
36#include "llvm/IR/GlobalVariable.h"
37#include "llvm/IR/Instructions.h"
38#include "llvm/IR/IntrinsicInst.h"
39#include "llvm/IR/Operator.h"
40#include "llvm/Support/ErrorHandling.h"
41#include "llvm/Target/TargetOptions.h"
42using namespace llvm;
43
44namespace {
45
46class X86FastISel final : public FastISel {
47 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
48 /// make the right decision when generating code for different targets.
49 const X86Subtarget *Subtarget;
50
51 /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87
52 /// floating point ops.
53 /// When SSE is available, use it for f32 operations.
54 /// When SSE2 is available, use it for f64 operations.
55 bool X86ScalarSSEf64;
56 bool X86ScalarSSEf32;
57
58public:
59 explicit X86FastISel(FunctionLoweringInfo &funcInfo,
60 const TargetLibraryInfo *libInfo)
Eric Christophera1c535b2015-02-02 23:03:45 +000061 : FastISel(funcInfo, libInfo) {
62 Subtarget = &funcInfo.MF->getSubtarget<X86Subtarget>();
Michael Kupersteine86aa9a2015-02-01 16:15:07 +000063 X86ScalarSSEf64 = Subtarget->hasSSE2();
64 X86ScalarSSEf32 = Subtarget->hasSSE1();
65 }
66
67 bool fastSelectInstruction(const Instruction *I) override;
68
69 /// \brief The specified machine instr operand is a vreg, and that
70 /// vreg is being provided by the specified load instruction. If possible,
71 /// try to fold the load as an operand to the instruction, returning true if
72 /// possible.
73 bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
74 const LoadInst *LI) override;
75
76 bool fastLowerArguments() override;
77 bool fastLowerCall(CallLoweringInfo &CLI) override;
78 bool fastLowerIntrinsicCall(const IntrinsicInst *II) override;
79
80#include "X86GenFastISel.inc"
81
82private:
83 bool X86FastEmitCompare(const Value *LHS, const Value *RHS, EVT VT, DebugLoc DL);
84
85 bool X86FastEmitLoad(EVT VT, const X86AddressMode &AM, MachineMemOperand *MMO,
86 unsigned &ResultReg);
87
88 bool X86FastEmitStore(EVT VT, const Value *Val, const X86AddressMode &AM,
89 MachineMemOperand *MMO = nullptr, bool Aligned = false);
90 bool X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill,
91 const X86AddressMode &AM,
92 MachineMemOperand *MMO = nullptr, bool Aligned = false);
93
94 bool X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT,
95 unsigned &ResultReg);
96
97 bool X86SelectAddress(const Value *V, X86AddressMode &AM);
98 bool X86SelectCallAddress(const Value *V, X86AddressMode &AM);
99
100 bool X86SelectLoad(const Instruction *I);
101
102 bool X86SelectStore(const Instruction *I);
103
104 bool X86SelectRet(const Instruction *I);
105
106 bool X86SelectCmp(const Instruction *I);
107
108 bool X86SelectZExt(const Instruction *I);
109
110 bool X86SelectBranch(const Instruction *I);
111
112 bool X86SelectShift(const Instruction *I);
113
114 bool X86SelectDivRem(const Instruction *I);
115
116 bool X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I);
117
118 bool X86FastEmitSSESelect(MVT RetVT, const Instruction *I);
119
120 bool X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I);
121
122 bool X86SelectSelect(const Instruction *I);
123
124 bool X86SelectTrunc(const Instruction *I);
125
Andrea Di Biagio62622d22015-02-10 12:04:41 +0000126 bool X86SelectFPExtOrFPTrunc(const Instruction *I, unsigned Opc,
127 const TargetRegisterClass *RC);
128
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000129 bool X86SelectFPExt(const Instruction *I);
130 bool X86SelectFPTrunc(const Instruction *I);
131
132 const X86InstrInfo *getInstrInfo() const {
Eric Christophera1c535b2015-02-02 23:03:45 +0000133 return Subtarget->getInstrInfo();
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000134 }
135 const X86TargetMachine *getTargetMachine() const {
136 return static_cast<const X86TargetMachine *>(&TM);
137 }
138
139 bool handleConstantAddresses(const Value *V, X86AddressMode &AM);
140
141 unsigned X86MaterializeInt(const ConstantInt *CI, MVT VT);
142 unsigned X86MaterializeFP(const ConstantFP *CFP, MVT VT);
143 unsigned X86MaterializeGV(const GlobalValue *GV, MVT VT);
144 unsigned fastMaterializeConstant(const Constant *C) override;
145
146 unsigned fastMaterializeAlloca(const AllocaInst *C) override;
147
148 unsigned fastMaterializeFloatZero(const ConstantFP *CF) override;
149
150 /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
151 /// computed in an SSE register, not on the X87 floating point stack.
152 bool isScalarFPTypeInSSEReg(EVT VT) const {
153 return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
154 (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
155 }
156
157 bool isTypeLegal(Type *Ty, MVT &VT, bool AllowI1 = false);
158
159 bool IsMemcpySmall(uint64_t Len);
160
161 bool TryEmitSmallMemcpy(X86AddressMode DestAM,
162 X86AddressMode SrcAM, uint64_t Len);
163
164 bool foldX86XALUIntrinsic(X86::CondCode &CC, const Instruction *I,
165 const Value *Cond);
166};
167
168} // end anonymous namespace.
169
170static std::pair<X86::CondCode, bool>
171getX86ConditionCode(CmpInst::Predicate Predicate) {
172 X86::CondCode CC = X86::COND_INVALID;
173 bool NeedSwap = false;
174 switch (Predicate) {
175 default: break;
176 // Floating-point Predicates
177 case CmpInst::FCMP_UEQ: CC = X86::COND_E; break;
178 case CmpInst::FCMP_OLT: NeedSwap = true; // fall-through
179 case CmpInst::FCMP_OGT: CC = X86::COND_A; break;
180 case CmpInst::FCMP_OLE: NeedSwap = true; // fall-through
181 case CmpInst::FCMP_OGE: CC = X86::COND_AE; break;
182 case CmpInst::FCMP_UGT: NeedSwap = true; // fall-through
183 case CmpInst::FCMP_ULT: CC = X86::COND_B; break;
184 case CmpInst::FCMP_UGE: NeedSwap = true; // fall-through
185 case CmpInst::FCMP_ULE: CC = X86::COND_BE; break;
186 case CmpInst::FCMP_ONE: CC = X86::COND_NE; break;
187 case CmpInst::FCMP_UNO: CC = X86::COND_P; break;
188 case CmpInst::FCMP_ORD: CC = X86::COND_NP; break;
189 case CmpInst::FCMP_OEQ: // fall-through
190 case CmpInst::FCMP_UNE: CC = X86::COND_INVALID; break;
191
192 // Integer Predicates
193 case CmpInst::ICMP_EQ: CC = X86::COND_E; break;
194 case CmpInst::ICMP_NE: CC = X86::COND_NE; break;
195 case CmpInst::ICMP_UGT: CC = X86::COND_A; break;
196 case CmpInst::ICMP_UGE: CC = X86::COND_AE; break;
197 case CmpInst::ICMP_ULT: CC = X86::COND_B; break;
198 case CmpInst::ICMP_ULE: CC = X86::COND_BE; break;
199 case CmpInst::ICMP_SGT: CC = X86::COND_G; break;
200 case CmpInst::ICMP_SGE: CC = X86::COND_GE; break;
201 case CmpInst::ICMP_SLT: CC = X86::COND_L; break;
202 case CmpInst::ICMP_SLE: CC = X86::COND_LE; break;
203 }
204
205 return std::make_pair(CC, NeedSwap);
206}
207
208static std::pair<unsigned, bool>
209getX86SSEConditionCode(CmpInst::Predicate Predicate) {
210 unsigned CC;
211 bool NeedSwap = false;
212
213 // SSE Condition code mapping:
214 // 0 - EQ
215 // 1 - LT
216 // 2 - LE
217 // 3 - UNORD
218 // 4 - NEQ
219 // 5 - NLT
220 // 6 - NLE
221 // 7 - ORD
222 switch (Predicate) {
223 default: llvm_unreachable("Unexpected predicate");
224 case CmpInst::FCMP_OEQ: CC = 0; break;
225 case CmpInst::FCMP_OGT: NeedSwap = true; // fall-through
226 case CmpInst::FCMP_OLT: CC = 1; break;
227 case CmpInst::FCMP_OGE: NeedSwap = true; // fall-through
228 case CmpInst::FCMP_OLE: CC = 2; break;
229 case CmpInst::FCMP_UNO: CC = 3; break;
230 case CmpInst::FCMP_UNE: CC = 4; break;
231 case CmpInst::FCMP_ULE: NeedSwap = true; // fall-through
232 case CmpInst::FCMP_UGE: CC = 5; break;
233 case CmpInst::FCMP_ULT: NeedSwap = true; // fall-through
234 case CmpInst::FCMP_UGT: CC = 6; break;
235 case CmpInst::FCMP_ORD: CC = 7; break;
236 case CmpInst::FCMP_UEQ:
237 case CmpInst::FCMP_ONE: CC = 8; break;
238 }
239
240 return std::make_pair(CC, NeedSwap);
241}
242
243/// \brief Check if it is possible to fold the condition from the XALU intrinsic
244/// into the user. The condition code will only be updated on success.
245bool X86FastISel::foldX86XALUIntrinsic(X86::CondCode &CC, const Instruction *I,
246 const Value *Cond) {
247 if (!isa<ExtractValueInst>(Cond))
248 return false;
249
250 const auto *EV = cast<ExtractValueInst>(Cond);
251 if (!isa<IntrinsicInst>(EV->getAggregateOperand()))
252 return false;
253
254 const auto *II = cast<IntrinsicInst>(EV->getAggregateOperand());
255 MVT RetVT;
256 const Function *Callee = II->getCalledFunction();
257 Type *RetTy =
258 cast<StructType>(Callee->getReturnType())->getTypeAtIndex(0U);
259 if (!isTypeLegal(RetTy, RetVT))
260 return false;
261
262 if (RetVT != MVT::i32 && RetVT != MVT::i64)
263 return false;
264
265 X86::CondCode TmpCC;
266 switch (II->getIntrinsicID()) {
267 default: return false;
268 case Intrinsic::sadd_with_overflow:
269 case Intrinsic::ssub_with_overflow:
270 case Intrinsic::smul_with_overflow:
271 case Intrinsic::umul_with_overflow: TmpCC = X86::COND_O; break;
272 case Intrinsic::uadd_with_overflow:
273 case Intrinsic::usub_with_overflow: TmpCC = X86::COND_B; break;
274 }
275
276 // Check if both instructions are in the same basic block.
277 if (II->getParent() != I->getParent())
278 return false;
279
280 // Make sure nothing is in the way
281 BasicBlock::const_iterator Start = I;
282 BasicBlock::const_iterator End = II;
283 for (auto Itr = std::prev(Start); Itr != End; --Itr) {
284 // We only expect extractvalue instructions between the intrinsic and the
285 // instruction to be selected.
286 if (!isa<ExtractValueInst>(Itr))
287 return false;
288
289 // Check that the extractvalue operand comes from the intrinsic.
290 const auto *EVI = cast<ExtractValueInst>(Itr);
291 if (EVI->getAggregateOperand() != II)
292 return false;
293 }
294
295 CC = TmpCC;
296 return true;
297}
298
299bool X86FastISel::isTypeLegal(Type *Ty, MVT &VT, bool AllowI1) {
300 EVT evt = TLI.getValueType(Ty, /*HandleUnknown=*/true);
301 if (evt == MVT::Other || !evt.isSimple())
302 // Unhandled type. Halt "fast" selection and bail.
303 return false;
304
305 VT = evt.getSimpleVT();
306 // For now, require SSE/SSE2 for performing floating-point operations,
307 // since x87 requires additional work.
308 if (VT == MVT::f64 && !X86ScalarSSEf64)
309 return false;
310 if (VT == MVT::f32 && !X86ScalarSSEf32)
311 return false;
312 // Similarly, no f80 support yet.
313 if (VT == MVT::f80)
314 return false;
315 // We only handle legal types. For example, on x86-32 the instruction
316 // selector contains all of the 64-bit instructions from x86-64,
317 // under the assumption that i64 won't be used if the target doesn't
318 // support it.
319 return (AllowI1 && VT == MVT::i1) || TLI.isTypeLegal(VT);
320}
321
322#include "X86GenCallingConv.inc"
323
324/// X86FastEmitLoad - Emit a machine instruction to load a value of type VT.
325/// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV.
326/// Return true and the result register by reference if it is possible.
327bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM,
328 MachineMemOperand *MMO, unsigned &ResultReg) {
329 // Get opcode and regclass of the output for the given load instruction.
330 unsigned Opc = 0;
331 const TargetRegisterClass *RC = nullptr;
332 switch (VT.getSimpleVT().SimpleTy) {
333 default: return false;
334 case MVT::i1:
335 case MVT::i8:
336 Opc = X86::MOV8rm;
337 RC = &X86::GR8RegClass;
338 break;
339 case MVT::i16:
340 Opc = X86::MOV16rm;
341 RC = &X86::GR16RegClass;
342 break;
343 case MVT::i32:
344 Opc = X86::MOV32rm;
345 RC = &X86::GR32RegClass;
346 break;
347 case MVT::i64:
348 // Must be in x86-64 mode.
349 Opc = X86::MOV64rm;
350 RC = &X86::GR64RegClass;
351 break;
352 case MVT::f32:
353 if (X86ScalarSSEf32) {
354 Opc = Subtarget->hasAVX() ? X86::VMOVSSrm : X86::MOVSSrm;
355 RC = &X86::FR32RegClass;
356 } else {
357 Opc = X86::LD_Fp32m;
358 RC = &X86::RFP32RegClass;
359 }
360 break;
361 case MVT::f64:
362 if (X86ScalarSSEf64) {
363 Opc = Subtarget->hasAVX() ? X86::VMOVSDrm : X86::MOVSDrm;
364 RC = &X86::FR64RegClass;
365 } else {
366 Opc = X86::LD_Fp64m;
367 RC = &X86::RFP64RegClass;
368 }
369 break;
370 case MVT::f80:
371 // No f80 support yet.
372 return false;
373 }
374
375 ResultReg = createResultReg(RC);
376 MachineInstrBuilder MIB =
377 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg);
378 addFullAddress(MIB, AM);
379 if (MMO)
380 MIB->addMemOperand(*FuncInfo.MF, MMO);
381 return true;
382}
383
384/// X86FastEmitStore - Emit a machine instruction to store a value Val of
385/// type VT. The address is either pre-computed, consisted of a base ptr, Ptr
386/// and a displacement offset, or a GlobalAddress,
387/// i.e. V. Return true if it is possible.
388bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill,
389 const X86AddressMode &AM,
390 MachineMemOperand *MMO, bool Aligned) {
391 // Get opcode and regclass of the output for the given store instruction.
392 unsigned Opc = 0;
393 switch (VT.getSimpleVT().SimpleTy) {
394 case MVT::f80: // No f80 support yet.
395 default: return false;
396 case MVT::i1: {
397 // Mask out all but lowest bit.
398 unsigned AndResult = createResultReg(&X86::GR8RegClass);
399 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
400 TII.get(X86::AND8ri), AndResult)
401 .addReg(ValReg, getKillRegState(ValIsKill)).addImm(1);
402 ValReg = AndResult;
403 }
404 // FALLTHROUGH, handling i1 as i8.
405 case MVT::i8: Opc = X86::MOV8mr; break;
406 case MVT::i16: Opc = X86::MOV16mr; break;
407 case MVT::i32: Opc = X86::MOV32mr; break;
408 case MVT::i64: Opc = X86::MOV64mr; break; // Must be in x86-64 mode.
409 case MVT::f32:
410 Opc = X86ScalarSSEf32 ?
411 (Subtarget->hasAVX() ? X86::VMOVSSmr : X86::MOVSSmr) : X86::ST_Fp32m;
412 break;
413 case MVT::f64:
414 Opc = X86ScalarSSEf64 ?
415 (Subtarget->hasAVX() ? X86::VMOVSDmr : X86::MOVSDmr) : X86::ST_Fp64m;
416 break;
417 case MVT::v4f32:
418 if (Aligned)
419 Opc = Subtarget->hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr;
420 else
421 Opc = Subtarget->hasAVX() ? X86::VMOVUPSmr : X86::MOVUPSmr;
422 break;
423 case MVT::v2f64:
424 if (Aligned)
425 Opc = Subtarget->hasAVX() ? X86::VMOVAPDmr : X86::MOVAPDmr;
426 else
427 Opc = Subtarget->hasAVX() ? X86::VMOVUPDmr : X86::MOVUPDmr;
428 break;
429 case MVT::v4i32:
430 case MVT::v2i64:
431 case MVT::v8i16:
432 case MVT::v16i8:
433 if (Aligned)
434 Opc = Subtarget->hasAVX() ? X86::VMOVDQAmr : X86::MOVDQAmr;
435 else
436 Opc = Subtarget->hasAVX() ? X86::VMOVDQUmr : X86::MOVDQUmr;
437 break;
438 }
439
440 MachineInstrBuilder MIB =
441 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc));
442 addFullAddress(MIB, AM).addReg(ValReg, getKillRegState(ValIsKill));
443 if (MMO)
444 MIB->addMemOperand(*FuncInfo.MF, MMO);
445
446 return true;
447}
448
449bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
450 const X86AddressMode &AM,
451 MachineMemOperand *MMO, bool Aligned) {
452 // Handle 'null' like i32/i64 0.
453 if (isa<ConstantPointerNull>(Val))
454 Val = Constant::getNullValue(DL.getIntPtrType(Val->getContext()));
455
456 // If this is a store of a simple constant, fold the constant into the store.
457 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
458 unsigned Opc = 0;
459 bool Signed = true;
460 switch (VT.getSimpleVT().SimpleTy) {
461 default: break;
462 case MVT::i1: Signed = false; // FALLTHROUGH to handle as i8.
463 case MVT::i8: Opc = X86::MOV8mi; break;
464 case MVT::i16: Opc = X86::MOV16mi; break;
465 case MVT::i32: Opc = X86::MOV32mi; break;
466 case MVT::i64:
467 // Must be a 32-bit sign extended value.
468 if (isInt<32>(CI->getSExtValue()))
469 Opc = X86::MOV64mi32;
470 break;
471 }
472
473 if (Opc) {
474 MachineInstrBuilder MIB =
475 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc));
476 addFullAddress(MIB, AM).addImm(Signed ? (uint64_t) CI->getSExtValue()
477 : CI->getZExtValue());
478 if (MMO)
479 MIB->addMemOperand(*FuncInfo.MF, MMO);
480 return true;
481 }
482 }
483
484 unsigned ValReg = getRegForValue(Val);
485 if (ValReg == 0)
486 return false;
487
488 bool ValKill = hasTrivialKill(Val);
489 return X86FastEmitStore(VT, ValReg, ValKill, AM, MMO, Aligned);
490}
491
492/// X86FastEmitExtend - Emit a machine instruction to extend a value Src of
493/// type SrcVT to type DstVT using the specified extension opcode Opc (e.g.
494/// ISD::SIGN_EXTEND).
495bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT,
496 unsigned Src, EVT SrcVT,
497 unsigned &ResultReg) {
498 unsigned RR = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc,
499 Src, /*TODO: Kill=*/false);
500 if (RR == 0)
501 return false;
502
503 ResultReg = RR;
504 return true;
505}
506
507bool X86FastISel::handleConstantAddresses(const Value *V, X86AddressMode &AM) {
508 // Handle constant address.
509 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
510 // Can't handle alternate code models yet.
511 if (TM.getCodeModel() != CodeModel::Small)
512 return false;
513
514 // Can't handle TLS yet.
515 if (GV->isThreadLocal())
516 return false;
517
518 // RIP-relative addresses can't have additional register operands, so if
519 // we've already folded stuff into the addressing mode, just force the
520 // global value into its own register, which we can use as the basereg.
521 if (!Subtarget->isPICStyleRIPRel() ||
522 (AM.Base.Reg == 0 && AM.IndexReg == 0)) {
523 // Okay, we've committed to selecting this global. Set up the address.
524 AM.GV = GV;
525
526 // Allow the subtarget to classify the global.
527 unsigned char GVFlags = Subtarget->ClassifyGlobalReference(GV, TM);
528
529 // If this reference is relative to the pic base, set it now.
530 if (isGlobalRelativeToPICBase(GVFlags)) {
531 // FIXME: How do we know Base.Reg is free??
532 AM.Base.Reg = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
533 }
534
535 // Unless the ABI requires an extra load, return a direct reference to
536 // the global.
537 if (!isGlobalStubReference(GVFlags)) {
538 if (Subtarget->isPICStyleRIPRel()) {
539 // Use rip-relative addressing if we can. Above we verified that the
540 // base and index registers are unused.
541 assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
542 AM.Base.Reg = X86::RIP;
543 }
544 AM.GVOpFlags = GVFlags;
545 return true;
546 }
547
548 // Ok, we need to do a load from a stub. If we've already loaded from
549 // this stub, reuse the loaded pointer, otherwise emit the load now.
550 DenseMap<const Value *, unsigned>::iterator I = LocalValueMap.find(V);
551 unsigned LoadReg;
552 if (I != LocalValueMap.end() && I->second != 0) {
553 LoadReg = I->second;
554 } else {
555 // Issue load from stub.
556 unsigned Opc = 0;
557 const TargetRegisterClass *RC = nullptr;
558 X86AddressMode StubAM;
559 StubAM.Base.Reg = AM.Base.Reg;
560 StubAM.GV = GV;
561 StubAM.GVOpFlags = GVFlags;
562
563 // Prepare for inserting code in the local-value area.
564 SavePoint SaveInsertPt = enterLocalValueArea();
565
566 if (TLI.getPointerTy() == MVT::i64) {
567 Opc = X86::MOV64rm;
568 RC = &X86::GR64RegClass;
569
570 if (Subtarget->isPICStyleRIPRel())
571 StubAM.Base.Reg = X86::RIP;
572 } else {
573 Opc = X86::MOV32rm;
574 RC = &X86::GR32RegClass;
575 }
576
577 LoadReg = createResultReg(RC);
578 MachineInstrBuilder LoadMI =
579 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), LoadReg);
580 addFullAddress(LoadMI, StubAM);
581
582 // Ok, back to normal mode.
583 leaveLocalValueArea(SaveInsertPt);
584
585 // Prevent loading GV stub multiple times in same MBB.
586 LocalValueMap[V] = LoadReg;
587 }
588
589 // Now construct the final address. Note that the Disp, Scale,
590 // and Index values may already be set here.
591 AM.Base.Reg = LoadReg;
592 AM.GV = nullptr;
593 return true;
594 }
595 }
596
597 // If all else fails, try to materialize the value in a register.
598 if (!AM.GV || !Subtarget->isPICStyleRIPRel()) {
599 if (AM.Base.Reg == 0) {
600 AM.Base.Reg = getRegForValue(V);
601 return AM.Base.Reg != 0;
602 }
603 if (AM.IndexReg == 0) {
604 assert(AM.Scale == 1 && "Scale with no index!");
605 AM.IndexReg = getRegForValue(V);
606 return AM.IndexReg != 0;
607 }
608 }
609
610 return false;
611}
612
613/// X86SelectAddress - Attempt to fill in an address from the given value.
614///
615bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
616 SmallVector<const Value *, 32> GEPs;
617redo_gep:
618 const User *U = nullptr;
619 unsigned Opcode = Instruction::UserOp1;
620 if (const Instruction *I = dyn_cast<Instruction>(V)) {
621 // Don't walk into other basic blocks; it's possible we haven't
622 // visited them yet, so the instructions may not yet be assigned
623 // virtual registers.
624 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(V)) ||
625 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
626 Opcode = I->getOpcode();
627 U = I;
628 }
629 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
630 Opcode = C->getOpcode();
631 U = C;
632 }
633
634 if (PointerType *Ty = dyn_cast<PointerType>(V->getType()))
635 if (Ty->getAddressSpace() > 255)
636 // Fast instruction selection doesn't support the special
637 // address spaces.
638 return false;
639
640 switch (Opcode) {
641 default: break;
642 case Instruction::BitCast:
643 // Look past bitcasts.
644 return X86SelectAddress(U->getOperand(0), AM);
645
646 case Instruction::IntToPtr:
647 // Look past no-op inttoptrs.
648 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy())
649 return X86SelectAddress(U->getOperand(0), AM);
650 break;
651
652 case Instruction::PtrToInt:
653 // Look past no-op ptrtoints.
654 if (TLI.getValueType(U->getType()) == TLI.getPointerTy())
655 return X86SelectAddress(U->getOperand(0), AM);
656 break;
657
658 case Instruction::Alloca: {
659 // Do static allocas.
660 const AllocaInst *A = cast<AllocaInst>(V);
661 DenseMap<const AllocaInst *, int>::iterator SI =
662 FuncInfo.StaticAllocaMap.find(A);
663 if (SI != FuncInfo.StaticAllocaMap.end()) {
664 AM.BaseType = X86AddressMode::FrameIndexBase;
665 AM.Base.FrameIndex = SI->second;
666 return true;
667 }
668 break;
669 }
670
671 case Instruction::Add: {
672 // Adds of constants are common and easy enough.
673 if (const ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
674 uint64_t Disp = (int32_t)AM.Disp + (uint64_t)CI->getSExtValue();
675 // They have to fit in the 32-bit signed displacement field though.
676 if (isInt<32>(Disp)) {
677 AM.Disp = (uint32_t)Disp;
678 return X86SelectAddress(U->getOperand(0), AM);
679 }
680 }
681 break;
682 }
683
684 case Instruction::GetElementPtr: {
685 X86AddressMode SavedAM = AM;
686
687 // Pattern-match simple GEPs.
688 uint64_t Disp = (int32_t)AM.Disp;
689 unsigned IndexReg = AM.IndexReg;
690 unsigned Scale = AM.Scale;
691 gep_type_iterator GTI = gep_type_begin(U);
692 // Iterate through the indices, folding what we can. Constants can be
693 // folded, and one dynamic index can be handled, if the scale is supported.
694 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
695 i != e; ++i, ++GTI) {
696 const Value *Op = *i;
697 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
698 const StructLayout *SL = DL.getStructLayout(STy);
699 Disp += SL->getElementOffset(cast<ConstantInt>(Op)->getZExtValue());
700 continue;
701 }
702
703 // A array/variable index is always of the form i*S where S is the
704 // constant scale size. See if we can push the scale into immediates.
705 uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType());
706 for (;;) {
707 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
708 // Constant-offset addressing.
709 Disp += CI->getSExtValue() * S;
710 break;
711 }
712 if (canFoldAddIntoGEP(U, Op)) {
713 // A compatible add with a constant operand. Fold the constant.
714 ConstantInt *CI =
715 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
716 Disp += CI->getSExtValue() * S;
717 // Iterate on the other operand.
718 Op = cast<AddOperator>(Op)->getOperand(0);
719 continue;
720 }
721 if (IndexReg == 0 &&
722 (!AM.GV || !Subtarget->isPICStyleRIPRel()) &&
723 (S == 1 || S == 2 || S == 4 || S == 8)) {
724 // Scaled-index addressing.
725 Scale = S;
726 IndexReg = getRegForGEPIndex(Op).first;
727 if (IndexReg == 0)
728 return false;
729 break;
730 }
731 // Unsupported.
732 goto unsupported_gep;
733 }
734 }
735
736 // Check for displacement overflow.
737 if (!isInt<32>(Disp))
738 break;
739
740 AM.IndexReg = IndexReg;
741 AM.Scale = Scale;
742 AM.Disp = (uint32_t)Disp;
743 GEPs.push_back(V);
744
745 if (const GetElementPtrInst *GEP =
746 dyn_cast<GetElementPtrInst>(U->getOperand(0))) {
747 // Ok, the GEP indices were covered by constant-offset and scaled-index
748 // addressing. Update the address state and move on to examining the base.
749 V = GEP;
750 goto redo_gep;
751 } else if (X86SelectAddress(U->getOperand(0), AM)) {
752 return true;
753 }
754
755 // If we couldn't merge the gep value into this addr mode, revert back to
756 // our address and just match the value instead of completely failing.
757 AM = SavedAM;
758
759 for (SmallVectorImpl<const Value *>::reverse_iterator
760 I = GEPs.rbegin(), E = GEPs.rend(); I != E; ++I)
761 if (handleConstantAddresses(*I, AM))
762 return true;
763
764 return false;
765 unsupported_gep:
766 // Ok, the GEP indices weren't all covered.
767 break;
768 }
769 }
770
771 return handleConstantAddresses(V, AM);
772}
773
774/// X86SelectCallAddress - Attempt to fill in an address from the given value.
775///
776bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) {
777 const User *U = nullptr;
778 unsigned Opcode = Instruction::UserOp1;
779 const Instruction *I = dyn_cast<Instruction>(V);
780 // Record if the value is defined in the same basic block.
781 //
782 // This information is crucial to know whether or not folding an
783 // operand is valid.
784 // Indeed, FastISel generates or reuses a virtual register for all
785 // operands of all instructions it selects. Obviously, the definition and
786 // its uses must use the same virtual register otherwise the produced
787 // code is incorrect.
788 // Before instruction selection, FunctionLoweringInfo::set sets the virtual
789 // registers for values that are alive across basic blocks. This ensures
790 // that the values are consistently set between across basic block, even
791 // if different instruction selection mechanisms are used (e.g., a mix of
792 // SDISel and FastISel).
793 // For values local to a basic block, the instruction selection process
794 // generates these virtual registers with whatever method is appropriate
795 // for its needs. In particular, FastISel and SDISel do not share the way
796 // local virtual registers are set.
797 // Therefore, this is impossible (or at least unsafe) to share values
798 // between basic blocks unless they use the same instruction selection
799 // method, which is not guarantee for X86.
800 // Moreover, things like hasOneUse could not be used accurately, if we
801 // allow to reference values across basic blocks whereas they are not
802 // alive across basic blocks initially.
803 bool InMBB = true;
804 if (I) {
805 Opcode = I->getOpcode();
806 U = I;
807 InMBB = I->getParent() == FuncInfo.MBB->getBasicBlock();
808 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
809 Opcode = C->getOpcode();
810 U = C;
811 }
812
813 switch (Opcode) {
814 default: break;
815 case Instruction::BitCast:
816 // Look past bitcasts if its operand is in the same BB.
817 if (InMBB)
818 return X86SelectCallAddress(U->getOperand(0), AM);
819 break;
820
821 case Instruction::IntToPtr:
822 // Look past no-op inttoptrs if its operand is in the same BB.
823 if (InMBB &&
824 TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy())
825 return X86SelectCallAddress(U->getOperand(0), AM);
826 break;
827
828 case Instruction::PtrToInt:
829 // Look past no-op ptrtoints if its operand is in the same BB.
830 if (InMBB &&
831 TLI.getValueType(U->getType()) == TLI.getPointerTy())
832 return X86SelectCallAddress(U->getOperand(0), AM);
833 break;
834 }
835
836 // Handle constant address.
837 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
838 // Can't handle alternate code models yet.
839 if (TM.getCodeModel() != CodeModel::Small)
840 return false;
841
842 // RIP-relative addresses can't have additional register operands.
843 if (Subtarget->isPICStyleRIPRel() &&
844 (AM.Base.Reg != 0 || AM.IndexReg != 0))
845 return false;
846
847 // Can't handle DLL Import.
848 if (GV->hasDLLImportStorageClass())
849 return false;
850
851 // Can't handle TLS.
852 if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
853 if (GVar->isThreadLocal())
854 return false;
855
856 // Okay, we've committed to selecting this global. Set up the basic address.
857 AM.GV = GV;
858
859 // No ABI requires an extra load for anything other than DLLImport, which
860 // we rejected above. Return a direct reference to the global.
861 if (Subtarget->isPICStyleRIPRel()) {
862 // Use rip-relative addressing if we can. Above we verified that the
863 // base and index registers are unused.
864 assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
865 AM.Base.Reg = X86::RIP;
866 } else if (Subtarget->isPICStyleStubPIC()) {
867 AM.GVOpFlags = X86II::MO_PIC_BASE_OFFSET;
868 } else if (Subtarget->isPICStyleGOT()) {
869 AM.GVOpFlags = X86II::MO_GOTOFF;
870 }
871
872 return true;
873 }
874
875 // If all else fails, try to materialize the value in a register.
876 if (!AM.GV || !Subtarget->isPICStyleRIPRel()) {
877 if (AM.Base.Reg == 0) {
878 AM.Base.Reg = getRegForValue(V);
879 return AM.Base.Reg != 0;
880 }
881 if (AM.IndexReg == 0) {
882 assert(AM.Scale == 1 && "Scale with no index!");
883 AM.IndexReg = getRegForValue(V);
884 return AM.IndexReg != 0;
885 }
886 }
887
888 return false;
889}
890
891
892/// X86SelectStore - Select and emit code to implement store instructions.
893bool X86FastISel::X86SelectStore(const Instruction *I) {
894 // Atomic stores need special handling.
895 const StoreInst *S = cast<StoreInst>(I);
896
897 if (S->isAtomic())
898 return false;
899
900 const Value *Val = S->getValueOperand();
901 const Value *Ptr = S->getPointerOperand();
902
903 MVT VT;
904 if (!isTypeLegal(Val->getType(), VT, /*AllowI1=*/true))
905 return false;
906
907 unsigned Alignment = S->getAlignment();
908 unsigned ABIAlignment = DL.getABITypeAlignment(Val->getType());
909 if (Alignment == 0) // Ensure that codegen never sees alignment 0
910 Alignment = ABIAlignment;
911 bool Aligned = Alignment >= ABIAlignment;
912
913 X86AddressMode AM;
914 if (!X86SelectAddress(Ptr, AM))
915 return false;
916
917 return X86FastEmitStore(VT, Val, AM, createMachineMemOperandFor(I), Aligned);
918}
919
920/// X86SelectRet - Select and emit code to implement ret instructions.
921bool X86FastISel::X86SelectRet(const Instruction *I) {
922 const ReturnInst *Ret = cast<ReturnInst>(I);
923 const Function &F = *I->getParent()->getParent();
924 const X86MachineFunctionInfo *X86MFInfo =
925 FuncInfo.MF->getInfo<X86MachineFunctionInfo>();
926
927 if (!FuncInfo.CanLowerReturn)
928 return false;
929
930 CallingConv::ID CC = F.getCallingConv();
931 if (CC != CallingConv::C &&
932 CC != CallingConv::Fast &&
933 CC != CallingConv::X86_FastCall &&
934 CC != CallingConv::X86_64_SysV)
935 return false;
936
937 if (Subtarget->isCallingConvWin64(CC))
938 return false;
939
940 // Don't handle popping bytes on return for now.
941 if (X86MFInfo->getBytesToPopOnReturn() != 0)
942 return false;
943
944 // fastcc with -tailcallopt is intended to provide a guaranteed
945 // tail call optimization. Fastisel doesn't know how to do that.
946 if (CC == CallingConv::Fast && TM.Options.GuaranteedTailCallOpt)
947 return false;
948
949 // Let SDISel handle vararg functions.
950 if (F.isVarArg())
951 return false;
952
953 // Build a list of return value registers.
954 SmallVector<unsigned, 4> RetRegs;
955
956 if (Ret->getNumOperands() > 0) {
957 SmallVector<ISD::OutputArg, 4> Outs;
958 GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI);
959
960 // Analyze operands of the call, assigning locations to each operand.
961 SmallVector<CCValAssign, 16> ValLocs;
962 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs, I->getContext());
963 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
964
965 const Value *RV = Ret->getOperand(0);
966 unsigned Reg = getRegForValue(RV);
967 if (Reg == 0)
968 return false;
969
970 // Only handle a single return value for now.
971 if (ValLocs.size() != 1)
972 return false;
973
974 CCValAssign &VA = ValLocs[0];
975
976 // Don't bother handling odd stuff for now.
977 if (VA.getLocInfo() != CCValAssign::Full)
978 return false;
979 // Only handle register returns for now.
980 if (!VA.isRegLoc())
981 return false;
982
983 // The calling-convention tables for x87 returns don't tell
984 // the whole story.
985 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
986 return false;
987
988 unsigned SrcReg = Reg + VA.getValNo();
989 EVT SrcVT = TLI.getValueType(RV->getType());
990 EVT DstVT = VA.getValVT();
991 // Special handling for extended integers.
992 if (SrcVT != DstVT) {
993 if (SrcVT != MVT::i1 && SrcVT != MVT::i8 && SrcVT != MVT::i16)
994 return false;
995
996 if (!Outs[0].Flags.isZExt() && !Outs[0].Flags.isSExt())
997 return false;
998
999 assert(DstVT == MVT::i32 && "X86 should always ext to i32");
1000
1001 if (SrcVT == MVT::i1) {
1002 if (Outs[0].Flags.isSExt())
1003 return false;
1004 SrcReg = fastEmitZExtFromI1(MVT::i8, SrcReg, /*TODO: Kill=*/false);
1005 SrcVT = MVT::i8;
1006 }
1007 unsigned Op = Outs[0].Flags.isZExt() ? ISD::ZERO_EXTEND :
1008 ISD::SIGN_EXTEND;
1009 SrcReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op,
1010 SrcReg, /*TODO: Kill=*/false);
1011 }
1012
1013 // Make the copy.
1014 unsigned DstReg = VA.getLocReg();
1015 const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
1016 // Avoid a cross-class copy. This is very unlikely.
1017 if (!SrcRC->contains(DstReg))
1018 return false;
1019 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1020 TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg);
1021
1022 // Add register to return instruction.
1023 RetRegs.push_back(VA.getLocReg());
1024 }
1025
1026 // The x86-64 ABI for returning structs by value requires that we copy
1027 // the sret argument into %rax for the return. We saved the argument into
1028 // a virtual register in the entry block, so now we copy the value out
1029 // and into %rax. We also do the same with %eax for Win32.
1030 if (F.hasStructRetAttr() &&
1031 (Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC())) {
1032 unsigned Reg = X86MFInfo->getSRetReturnReg();
1033 assert(Reg &&
1034 "SRetReturnReg should have been set in LowerFormalArguments()!");
1035 unsigned RetReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
1036 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1037 TII.get(TargetOpcode::COPY), RetReg).addReg(Reg);
1038 RetRegs.push_back(RetReg);
1039 }
1040
1041 // Now emit the RET.
1042 MachineInstrBuilder MIB =
1043 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1044 TII.get(Subtarget->is64Bit() ? X86::RETQ : X86::RETL));
1045 for (unsigned i = 0, e = RetRegs.size(); i != e; ++i)
1046 MIB.addReg(RetRegs[i], RegState::Implicit);
1047 return true;
1048}
1049
1050/// X86SelectLoad - Select and emit code to implement load instructions.
1051///
1052bool X86FastISel::X86SelectLoad(const Instruction *I) {
1053 const LoadInst *LI = cast<LoadInst>(I);
1054
1055 // Atomic loads need special handling.
1056 if (LI->isAtomic())
1057 return false;
1058
1059 MVT VT;
1060 if (!isTypeLegal(LI->getType(), VT, /*AllowI1=*/true))
1061 return false;
1062
1063 const Value *Ptr = LI->getPointerOperand();
1064
1065 X86AddressMode AM;
1066 if (!X86SelectAddress(Ptr, AM))
1067 return false;
1068
1069 unsigned ResultReg = 0;
1070 if (!X86FastEmitLoad(VT, AM, createMachineMemOperandFor(LI), ResultReg))
1071 return false;
1072
1073 updateValueMap(I, ResultReg);
1074 return true;
1075}
1076
1077static unsigned X86ChooseCmpOpcode(EVT VT, const X86Subtarget *Subtarget) {
1078 bool HasAVX = Subtarget->hasAVX();
1079 bool X86ScalarSSEf32 = Subtarget->hasSSE1();
1080 bool X86ScalarSSEf64 = Subtarget->hasSSE2();
1081
1082 switch (VT.getSimpleVT().SimpleTy) {
1083 default: return 0;
1084 case MVT::i8: return X86::CMP8rr;
1085 case MVT::i16: return X86::CMP16rr;
1086 case MVT::i32: return X86::CMP32rr;
1087 case MVT::i64: return X86::CMP64rr;
1088 case MVT::f32:
1089 return X86ScalarSSEf32 ? (HasAVX ? X86::VUCOMISSrr : X86::UCOMISSrr) : 0;
1090 case MVT::f64:
1091 return X86ScalarSSEf64 ? (HasAVX ? X86::VUCOMISDrr : X86::UCOMISDrr) : 0;
1092 }
1093}
1094
1095/// X86ChooseCmpImmediateOpcode - If we have a comparison with RHS as the RHS
1096/// of the comparison, return an opcode that works for the compare (e.g.
1097/// CMP32ri) otherwise return 0.
1098static unsigned X86ChooseCmpImmediateOpcode(EVT VT, const ConstantInt *RHSC) {
1099 switch (VT.getSimpleVT().SimpleTy) {
1100 // Otherwise, we can't fold the immediate into this comparison.
1101 default: return 0;
1102 case MVT::i8: return X86::CMP8ri;
1103 case MVT::i16: return X86::CMP16ri;
1104 case MVT::i32: return X86::CMP32ri;
1105 case MVT::i64:
1106 // 64-bit comparisons are only valid if the immediate fits in a 32-bit sext
1107 // field.
1108 if ((int)RHSC->getSExtValue() == RHSC->getSExtValue())
1109 return X86::CMP64ri32;
1110 return 0;
1111 }
1112}
1113
1114bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1,
1115 EVT VT, DebugLoc CurDbgLoc) {
1116 unsigned Op0Reg = getRegForValue(Op0);
1117 if (Op0Reg == 0) return false;
1118
1119 // Handle 'null' like i32/i64 0.
1120 if (isa<ConstantPointerNull>(Op1))
1121 Op1 = Constant::getNullValue(DL.getIntPtrType(Op0->getContext()));
1122
1123 // We have two options: compare with register or immediate. If the RHS of
1124 // the compare is an immediate that we can fold into this compare, use
1125 // CMPri, otherwise use CMPrr.
1126 if (const ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
1127 if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) {
1128 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurDbgLoc, TII.get(CompareImmOpc))
1129 .addReg(Op0Reg)
1130 .addImm(Op1C->getSExtValue());
1131 return true;
1132 }
1133 }
1134
1135 unsigned CompareOpc = X86ChooseCmpOpcode(VT, Subtarget);
1136 if (CompareOpc == 0) return false;
1137
1138 unsigned Op1Reg = getRegForValue(Op1);
1139 if (Op1Reg == 0) return false;
1140 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurDbgLoc, TII.get(CompareOpc))
1141 .addReg(Op0Reg)
1142 .addReg(Op1Reg);
1143
1144 return true;
1145}
1146
1147bool X86FastISel::X86SelectCmp(const Instruction *I) {
1148 const CmpInst *CI = cast<CmpInst>(I);
1149
1150 MVT VT;
1151 if (!isTypeLegal(I->getOperand(0)->getType(), VT))
1152 return false;
1153
1154 // Try to optimize or fold the cmp.
1155 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
1156 unsigned ResultReg = 0;
1157 switch (Predicate) {
1158 default: break;
1159 case CmpInst::FCMP_FALSE: {
1160 ResultReg = createResultReg(&X86::GR32RegClass);
1161 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV32r0),
1162 ResultReg);
1163 ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultReg, /*Kill=*/true,
1164 X86::sub_8bit);
1165 if (!ResultReg)
1166 return false;
1167 break;
1168 }
1169 case CmpInst::FCMP_TRUE: {
1170 ResultReg = createResultReg(&X86::GR8RegClass);
1171 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV8ri),
1172 ResultReg).addImm(1);
1173 break;
1174 }
1175 }
1176
1177 if (ResultReg) {
1178 updateValueMap(I, ResultReg);
1179 return true;
1180 }
1181
1182 const Value *LHS = CI->getOperand(0);
1183 const Value *RHS = CI->getOperand(1);
1184
1185 // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x, 0.0.
1186 // We don't have to materialize a zero constant for this case and can just use
1187 // %x again on the RHS.
1188 if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO) {
1189 const auto *RHSC = dyn_cast<ConstantFP>(RHS);
1190 if (RHSC && RHSC->isNullValue())
1191 RHS = LHS;
1192 }
1193
1194 // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
1195 static unsigned SETFOpcTable[2][3] = {
1196 { X86::SETEr, X86::SETNPr, X86::AND8rr },
1197 { X86::SETNEr, X86::SETPr, X86::OR8rr }
1198 };
1199 unsigned *SETFOpc = nullptr;
1200 switch (Predicate) {
1201 default: break;
1202 case CmpInst::FCMP_OEQ: SETFOpc = &SETFOpcTable[0][0]; break;
1203 case CmpInst::FCMP_UNE: SETFOpc = &SETFOpcTable[1][0]; break;
1204 }
1205
1206 ResultReg = createResultReg(&X86::GR8RegClass);
1207 if (SETFOpc) {
1208 if (!X86FastEmitCompare(LHS, RHS, VT, I->getDebugLoc()))
1209 return false;
1210
1211 unsigned FlagReg1 = createResultReg(&X86::GR8RegClass);
1212 unsigned FlagReg2 = createResultReg(&X86::GR8RegClass);
1213 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[0]),
1214 FlagReg1);
1215 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[1]),
1216 FlagReg2);
1217 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[2]),
1218 ResultReg).addReg(FlagReg1).addReg(FlagReg2);
1219 updateValueMap(I, ResultReg);
1220 return true;
1221 }
1222
1223 X86::CondCode CC;
1224 bool SwapArgs;
1225 std::tie(CC, SwapArgs) = getX86ConditionCode(Predicate);
1226 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
1227 unsigned Opc = X86::getSETFromCond(CC);
1228
1229 if (SwapArgs)
1230 std::swap(LHS, RHS);
1231
1232 // Emit a compare of LHS/RHS.
1233 if (!X86FastEmitCompare(LHS, RHS, VT, I->getDebugLoc()))
1234 return false;
1235
1236 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg);
1237 updateValueMap(I, ResultReg);
1238 return true;
1239}
1240
1241bool X86FastISel::X86SelectZExt(const Instruction *I) {
1242 EVT DstVT = TLI.getValueType(I->getType());
1243 if (!TLI.isTypeLegal(DstVT))
1244 return false;
1245
1246 unsigned ResultReg = getRegForValue(I->getOperand(0));
1247 if (ResultReg == 0)
1248 return false;
1249
1250 // Handle zero-extension from i1 to i8, which is common.
1251 MVT SrcVT = TLI.getSimpleValueType(I->getOperand(0)->getType());
1252 if (SrcVT.SimpleTy == MVT::i1) {
1253 // Set the high bits to zero.
1254 ResultReg = fastEmitZExtFromI1(MVT::i8, ResultReg, /*TODO: Kill=*/false);
1255 SrcVT = MVT::i8;
1256
1257 if (ResultReg == 0)
1258 return false;
1259 }
1260
1261 if (DstVT == MVT::i64) {
1262 // Handle extension to 64-bits via sub-register shenanigans.
1263 unsigned MovInst;
1264
1265 switch (SrcVT.SimpleTy) {
1266 case MVT::i8: MovInst = X86::MOVZX32rr8; break;
1267 case MVT::i16: MovInst = X86::MOVZX32rr16; break;
1268 case MVT::i32: MovInst = X86::MOV32rr; break;
1269 default: llvm_unreachable("Unexpected zext to i64 source type");
1270 }
1271
1272 unsigned Result32 = createResultReg(&X86::GR32RegClass);
1273 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovInst), Result32)
1274 .addReg(ResultReg);
1275
1276 ResultReg = createResultReg(&X86::GR64RegClass);
1277 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::SUBREG_TO_REG),
1278 ResultReg)
1279 .addImm(0).addReg(Result32).addImm(X86::sub_32bit);
1280 } else if (DstVT != MVT::i8) {
1281 ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND,
1282 ResultReg, /*Kill=*/true);
1283 if (ResultReg == 0)
1284 return false;
1285 }
1286
1287 updateValueMap(I, ResultReg);
1288 return true;
1289}
1290
1291bool X86FastISel::X86SelectBranch(const Instruction *I) {
1292 // Unconditional branches are selected by tablegen-generated code.
1293 // Handle a conditional branch.
1294 const BranchInst *BI = cast<BranchInst>(I);
1295 MachineBasicBlock *TrueMBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
1296 MachineBasicBlock *FalseMBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
1297
1298 // Fold the common case of a conditional branch with a comparison
1299 // in the same block (values defined on other blocks may not have
1300 // initialized registers).
1301 X86::CondCode CC;
1302 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
1303 if (CI->hasOneUse() && CI->getParent() == I->getParent()) {
1304 EVT VT = TLI.getValueType(CI->getOperand(0)->getType());
1305
1306 // Try to optimize or fold the cmp.
1307 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
1308 switch (Predicate) {
1309 default: break;
1310 case CmpInst::FCMP_FALSE: fastEmitBranch(FalseMBB, DbgLoc); return true;
1311 case CmpInst::FCMP_TRUE: fastEmitBranch(TrueMBB, DbgLoc); return true;
1312 }
1313
1314 const Value *CmpLHS = CI->getOperand(0);
1315 const Value *CmpRHS = CI->getOperand(1);
1316
1317 // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x,
1318 // 0.0.
1319 // We don't have to materialize a zero constant for this case and can just
1320 // use %x again on the RHS.
1321 if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO) {
1322 const auto *CmpRHSC = dyn_cast<ConstantFP>(CmpRHS);
1323 if (CmpRHSC && CmpRHSC->isNullValue())
1324 CmpRHS = CmpLHS;
1325 }
1326
1327 // Try to take advantage of fallthrough opportunities.
1328 if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
1329 std::swap(TrueMBB, FalseMBB);
1330 Predicate = CmpInst::getInversePredicate(Predicate);
1331 }
1332
1333 // FCMP_OEQ and FCMP_UNE cannot be expressed with a single flag/condition
1334 // code check. Instead two branch instructions are required to check all
1335 // the flags. First we change the predicate to a supported condition code,
1336 // which will be the first branch. Later one we will emit the second
1337 // branch.
1338 bool NeedExtraBranch = false;
1339 switch (Predicate) {
1340 default: break;
1341 case CmpInst::FCMP_OEQ:
1342 std::swap(TrueMBB, FalseMBB); // fall-through
1343 case CmpInst::FCMP_UNE:
1344 NeedExtraBranch = true;
1345 Predicate = CmpInst::FCMP_ONE;
1346 break;
1347 }
1348
1349 bool SwapArgs;
1350 unsigned BranchOpc;
1351 std::tie(CC, SwapArgs) = getX86ConditionCode(Predicate);
1352 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
1353
1354 BranchOpc = X86::GetCondBranchFromCond(CC);
1355 if (SwapArgs)
1356 std::swap(CmpLHS, CmpRHS);
1357
1358 // Emit a compare of the LHS and RHS, setting the flags.
1359 if (!X86FastEmitCompare(CmpLHS, CmpRHS, VT, CI->getDebugLoc()))
1360 return false;
1361
1362 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BranchOpc))
1363 .addMBB(TrueMBB);
1364
1365 // X86 requires a second branch to handle UNE (and OEQ, which is mapped
1366 // to UNE above).
1367 if (NeedExtraBranch) {
1368 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JP_1))
1369 .addMBB(TrueMBB);
1370 }
1371
1372 // Obtain the branch weight and add the TrueBB to the successor list.
1373 uint32_t BranchWeight = 0;
1374 if (FuncInfo.BPI)
1375 BranchWeight = FuncInfo.BPI->getEdgeWeight(BI->getParent(),
1376 TrueMBB->getBasicBlock());
1377 FuncInfo.MBB->addSuccessor(TrueMBB, BranchWeight);
1378
1379 // Emits an unconditional branch to the FalseBB, obtains the branch
1380 // weight, and adds it to the successor list.
1381 fastEmitBranch(FalseMBB, DbgLoc);
1382
1383 return true;
1384 }
1385 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) {
1386 // Handle things like "%cond = trunc i32 %X to i1 / br i1 %cond", which
1387 // typically happen for _Bool and C++ bools.
1388 MVT SourceVT;
1389 if (TI->hasOneUse() && TI->getParent() == I->getParent() &&
1390 isTypeLegal(TI->getOperand(0)->getType(), SourceVT)) {
1391 unsigned TestOpc = 0;
1392 switch (SourceVT.SimpleTy) {
1393 default: break;
1394 case MVT::i8: TestOpc = X86::TEST8ri; break;
1395 case MVT::i16: TestOpc = X86::TEST16ri; break;
1396 case MVT::i32: TestOpc = X86::TEST32ri; break;
1397 case MVT::i64: TestOpc = X86::TEST64ri32; break;
1398 }
1399 if (TestOpc) {
1400 unsigned OpReg = getRegForValue(TI->getOperand(0));
1401 if (OpReg == 0) return false;
1402 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TestOpc))
1403 .addReg(OpReg).addImm(1);
1404
1405 unsigned JmpOpc = X86::JNE_1;
1406 if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
1407 std::swap(TrueMBB, FalseMBB);
1408 JmpOpc = X86::JE_1;
1409 }
1410
1411 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(JmpOpc))
1412 .addMBB(TrueMBB);
1413 fastEmitBranch(FalseMBB, DbgLoc);
1414 uint32_t BranchWeight = 0;
1415 if (FuncInfo.BPI)
1416 BranchWeight = FuncInfo.BPI->getEdgeWeight(BI->getParent(),
1417 TrueMBB->getBasicBlock());
1418 FuncInfo.MBB->addSuccessor(TrueMBB, BranchWeight);
1419 return true;
1420 }
1421 }
1422 } else if (foldX86XALUIntrinsic(CC, BI, BI->getCondition())) {
1423 // Fake request the condition, otherwise the intrinsic might be completely
1424 // optimized away.
1425 unsigned TmpReg = getRegForValue(BI->getCondition());
1426 if (TmpReg == 0)
1427 return false;
1428
1429 unsigned BranchOpc = X86::GetCondBranchFromCond(CC);
1430
1431 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BranchOpc))
1432 .addMBB(TrueMBB);
1433 fastEmitBranch(FalseMBB, DbgLoc);
1434 uint32_t BranchWeight = 0;
1435 if (FuncInfo.BPI)
1436 BranchWeight = FuncInfo.BPI->getEdgeWeight(BI->getParent(),
1437 TrueMBB->getBasicBlock());
1438 FuncInfo.MBB->addSuccessor(TrueMBB, BranchWeight);
1439 return true;
1440 }
1441
1442 // Otherwise do a clumsy setcc and re-test it.
1443 // Note that i1 essentially gets ANY_EXTEND'ed to i8 where it isn't used
1444 // in an explicit cast, so make sure to handle that correctly.
1445 unsigned OpReg = getRegForValue(BI->getCondition());
1446 if (OpReg == 0) return false;
1447
1448 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
1449 .addReg(OpReg).addImm(1);
1450 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JNE_1))
1451 .addMBB(TrueMBB);
1452 fastEmitBranch(FalseMBB, DbgLoc);
1453 uint32_t BranchWeight = 0;
1454 if (FuncInfo.BPI)
1455 BranchWeight = FuncInfo.BPI->getEdgeWeight(BI->getParent(),
1456 TrueMBB->getBasicBlock());
1457 FuncInfo.MBB->addSuccessor(TrueMBB, BranchWeight);
1458 return true;
1459}
1460
1461bool X86FastISel::X86SelectShift(const Instruction *I) {
1462 unsigned CReg = 0, OpReg = 0;
1463 const TargetRegisterClass *RC = nullptr;
1464 if (I->getType()->isIntegerTy(8)) {
1465 CReg = X86::CL;
1466 RC = &X86::GR8RegClass;
1467 switch (I->getOpcode()) {
1468 case Instruction::LShr: OpReg = X86::SHR8rCL; break;
1469 case Instruction::AShr: OpReg = X86::SAR8rCL; break;
1470 case Instruction::Shl: OpReg = X86::SHL8rCL; break;
1471 default: return false;
1472 }
1473 } else if (I->getType()->isIntegerTy(16)) {
1474 CReg = X86::CX;
1475 RC = &X86::GR16RegClass;
1476 switch (I->getOpcode()) {
1477 case Instruction::LShr: OpReg = X86::SHR16rCL; break;
1478 case Instruction::AShr: OpReg = X86::SAR16rCL; break;
1479 case Instruction::Shl: OpReg = X86::SHL16rCL; break;
1480 default: return false;
1481 }
1482 } else if (I->getType()->isIntegerTy(32)) {
1483 CReg = X86::ECX;
1484 RC = &X86::GR32RegClass;
1485 switch (I->getOpcode()) {
1486 case Instruction::LShr: OpReg = X86::SHR32rCL; break;
1487 case Instruction::AShr: OpReg = X86::SAR32rCL; break;
1488 case Instruction::Shl: OpReg = X86::SHL32rCL; break;
1489 default: return false;
1490 }
1491 } else if (I->getType()->isIntegerTy(64)) {
1492 CReg = X86::RCX;
1493 RC = &X86::GR64RegClass;
1494 switch (I->getOpcode()) {
1495 case Instruction::LShr: OpReg = X86::SHR64rCL; break;
1496 case Instruction::AShr: OpReg = X86::SAR64rCL; break;
1497 case Instruction::Shl: OpReg = X86::SHL64rCL; break;
1498 default: return false;
1499 }
1500 } else {
1501 return false;
1502 }
1503
1504 MVT VT;
1505 if (!isTypeLegal(I->getType(), VT))
1506 return false;
1507
1508 unsigned Op0Reg = getRegForValue(I->getOperand(0));
1509 if (Op0Reg == 0) return false;
1510
1511 unsigned Op1Reg = getRegForValue(I->getOperand(1));
1512 if (Op1Reg == 0) return false;
1513 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
1514 CReg).addReg(Op1Reg);
1515
1516 // The shift instruction uses X86::CL. If we defined a super-register
1517 // of X86::CL, emit a subreg KILL to precisely describe what we're doing here.
1518 if (CReg != X86::CL)
1519 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1520 TII.get(TargetOpcode::KILL), X86::CL)
1521 .addReg(CReg, RegState::Kill);
1522
1523 unsigned ResultReg = createResultReg(RC);
1524 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(OpReg), ResultReg)
1525 .addReg(Op0Reg);
1526 updateValueMap(I, ResultReg);
1527 return true;
1528}
1529
1530bool X86FastISel::X86SelectDivRem(const Instruction *I) {
1531 const static unsigned NumTypes = 4; // i8, i16, i32, i64
1532 const static unsigned NumOps = 4; // SDiv, SRem, UDiv, URem
1533 const static bool S = true; // IsSigned
1534 const static bool U = false; // !IsSigned
1535 const static unsigned Copy = TargetOpcode::COPY;
1536 // For the X86 DIV/IDIV instruction, in most cases the dividend
1537 // (numerator) must be in a specific register pair highreg:lowreg,
1538 // producing the quotient in lowreg and the remainder in highreg.
1539 // For most data types, to set up the instruction, the dividend is
1540 // copied into lowreg, and lowreg is sign-extended or zero-extended
1541 // into highreg. The exception is i8, where the dividend is defined
1542 // as a single register rather than a register pair, and we
1543 // therefore directly sign-extend or zero-extend the dividend into
1544 // lowreg, instead of copying, and ignore the highreg.
1545 const static struct DivRemEntry {
1546 // The following portion depends only on the data type.
1547 const TargetRegisterClass *RC;
1548 unsigned LowInReg; // low part of the register pair
1549 unsigned HighInReg; // high part of the register pair
1550 // The following portion depends on both the data type and the operation.
1551 struct DivRemResult {
1552 unsigned OpDivRem; // The specific DIV/IDIV opcode to use.
1553 unsigned OpSignExtend; // Opcode for sign-extending lowreg into
1554 // highreg, or copying a zero into highreg.
1555 unsigned OpCopy; // Opcode for copying dividend into lowreg, or
1556 // zero/sign-extending into lowreg for i8.
1557 unsigned DivRemResultReg; // Register containing the desired result.
1558 bool IsOpSigned; // Whether to use signed or unsigned form.
1559 } ResultTable[NumOps];
1560 } OpTable[NumTypes] = {
1561 { &X86::GR8RegClass, X86::AX, 0, {
1562 { X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S }, // SDiv
1563 { X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S }, // SRem
1564 { X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL, U }, // UDiv
1565 { X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH, U }, // URem
1566 }
1567 }, // i8
1568 { &X86::GR16RegClass, X86::AX, X86::DX, {
1569 { X86::IDIV16r, X86::CWD, Copy, X86::AX, S }, // SDiv
1570 { X86::IDIV16r, X86::CWD, Copy, X86::DX, S }, // SRem
1571 { X86::DIV16r, X86::MOV32r0, Copy, X86::AX, U }, // UDiv
1572 { X86::DIV16r, X86::MOV32r0, Copy, X86::DX, U }, // URem
1573 }
1574 }, // i16
1575 { &X86::GR32RegClass, X86::EAX, X86::EDX, {
1576 { X86::IDIV32r, X86::CDQ, Copy, X86::EAX, S }, // SDiv
1577 { X86::IDIV32r, X86::CDQ, Copy, X86::EDX, S }, // SRem
1578 { X86::DIV32r, X86::MOV32r0, Copy, X86::EAX, U }, // UDiv
1579 { X86::DIV32r, X86::MOV32r0, Copy, X86::EDX, U }, // URem
1580 }
1581 }, // i32
1582 { &X86::GR64RegClass, X86::RAX, X86::RDX, {
1583 { X86::IDIV64r, X86::CQO, Copy, X86::RAX, S }, // SDiv
1584 { X86::IDIV64r, X86::CQO, Copy, X86::RDX, S }, // SRem
1585 { X86::DIV64r, X86::MOV32r0, Copy, X86::RAX, U }, // UDiv
1586 { X86::DIV64r, X86::MOV32r0, Copy, X86::RDX, U }, // URem
1587 }
1588 }, // i64
1589 };
1590
1591 MVT VT;
1592 if (!isTypeLegal(I->getType(), VT))
1593 return false;
1594
1595 unsigned TypeIndex, OpIndex;
1596 switch (VT.SimpleTy) {
1597 default: return false;
1598 case MVT::i8: TypeIndex = 0; break;
1599 case MVT::i16: TypeIndex = 1; break;
1600 case MVT::i32: TypeIndex = 2; break;
1601 case MVT::i64: TypeIndex = 3;
1602 if (!Subtarget->is64Bit())
1603 return false;
1604 break;
1605 }
1606
1607 switch (I->getOpcode()) {
1608 default: llvm_unreachable("Unexpected div/rem opcode");
1609 case Instruction::SDiv: OpIndex = 0; break;
1610 case Instruction::SRem: OpIndex = 1; break;
1611 case Instruction::UDiv: OpIndex = 2; break;
1612 case Instruction::URem: OpIndex = 3; break;
1613 }
1614
1615 const DivRemEntry &TypeEntry = OpTable[TypeIndex];
1616 const DivRemEntry::DivRemResult &OpEntry = TypeEntry.ResultTable[OpIndex];
1617 unsigned Op0Reg = getRegForValue(I->getOperand(0));
1618 if (Op0Reg == 0)
1619 return false;
1620 unsigned Op1Reg = getRegForValue(I->getOperand(1));
1621 if (Op1Reg == 0)
1622 return false;
1623
1624 // Move op0 into low-order input register.
1625 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1626 TII.get(OpEntry.OpCopy), TypeEntry.LowInReg).addReg(Op0Reg);
1627 // Zero-extend or sign-extend into high-order input register.
1628 if (OpEntry.OpSignExtend) {
1629 if (OpEntry.IsOpSigned)
1630 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1631 TII.get(OpEntry.OpSignExtend));
1632 else {
1633 unsigned Zero32 = createResultReg(&X86::GR32RegClass);
1634 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1635 TII.get(X86::MOV32r0), Zero32);
1636
1637 // Copy the zero into the appropriate sub/super/identical physical
1638 // register. Unfortunately the operations needed are not uniform enough
1639 // to fit neatly into the table above.
1640 if (VT.SimpleTy == MVT::i16) {
1641 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1642 TII.get(Copy), TypeEntry.HighInReg)
1643 .addReg(Zero32, 0, X86::sub_16bit);
1644 } else if (VT.SimpleTy == MVT::i32) {
1645 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1646 TII.get(Copy), TypeEntry.HighInReg)
1647 .addReg(Zero32);
1648 } else if (VT.SimpleTy == MVT::i64) {
1649 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1650 TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg)
1651 .addImm(0).addReg(Zero32).addImm(X86::sub_32bit);
1652 }
1653 }
1654 }
1655 // Generate the DIV/IDIV instruction.
1656 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1657 TII.get(OpEntry.OpDivRem)).addReg(Op1Reg);
1658 // For i8 remainder, we can't reference AH directly, as we'll end
1659 // up with bogus copies like %R9B = COPY %AH. Reference AX
1660 // instead to prevent AH references in a REX instruction.
1661 //
1662 // The current assumption of the fast register allocator is that isel
1663 // won't generate explicit references to the GPR8_NOREX registers. If
1664 // the allocator and/or the backend get enhanced to be more robust in
1665 // that regard, this can be, and should be, removed.
1666 unsigned ResultReg = 0;
1667 if ((I->getOpcode() == Instruction::SRem ||
1668 I->getOpcode() == Instruction::URem) &&
1669 OpEntry.DivRemResultReg == X86::AH && Subtarget->is64Bit()) {
1670 unsigned SourceSuperReg = createResultReg(&X86::GR16RegClass);
1671 unsigned ResultSuperReg = createResultReg(&X86::GR16RegClass);
1672 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1673 TII.get(Copy), SourceSuperReg).addReg(X86::AX);
1674
1675 // Shift AX right by 8 bits instead of using AH.
1676 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SHR16ri),
1677 ResultSuperReg).addReg(SourceSuperReg).addImm(8);
1678
1679 // Now reference the 8-bit subreg of the result.
1680 ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultSuperReg,
1681 /*Kill=*/true, X86::sub_8bit);
1682 }
1683 // Copy the result out of the physreg if we haven't already.
1684 if (!ResultReg) {
1685 ResultReg = createResultReg(TypeEntry.RC);
1686 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Copy), ResultReg)
1687 .addReg(OpEntry.DivRemResultReg);
1688 }
1689 updateValueMap(I, ResultReg);
1690
1691 return true;
1692}
1693
1694/// \brief Emit a conditional move instruction (if the are supported) to lower
1695/// the select.
1696bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {
1697 // Check if the subtarget supports these instructions.
1698 if (!Subtarget->hasCMov())
1699 return false;
1700
1701 // FIXME: Add support for i8.
1702 if (RetVT < MVT::i16 || RetVT > MVT::i64)
1703 return false;
1704
1705 const Value *Cond = I->getOperand(0);
1706 const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
1707 bool NeedTest = true;
1708 X86::CondCode CC = X86::COND_NE;
1709
1710 // Optimize conditions coming from a compare if both instructions are in the
1711 // same basic block (values defined in other basic blocks may not have
1712 // initialized registers).
1713 const auto *CI = dyn_cast<CmpInst>(Cond);
1714 if (CI && (CI->getParent() == I->getParent())) {
1715 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
1716
1717 // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
1718 static unsigned SETFOpcTable[2][3] = {
1719 { X86::SETNPr, X86::SETEr , X86::TEST8rr },
1720 { X86::SETPr, X86::SETNEr, X86::OR8rr }
1721 };
1722 unsigned *SETFOpc = nullptr;
1723 switch (Predicate) {
1724 default: break;
1725 case CmpInst::FCMP_OEQ:
1726 SETFOpc = &SETFOpcTable[0][0];
1727 Predicate = CmpInst::ICMP_NE;
1728 break;
1729 case CmpInst::FCMP_UNE:
1730 SETFOpc = &SETFOpcTable[1][0];
1731 Predicate = CmpInst::ICMP_NE;
1732 break;
1733 }
1734
1735 bool NeedSwap;
1736 std::tie(CC, NeedSwap) = getX86ConditionCode(Predicate);
1737 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
1738
1739 const Value *CmpLHS = CI->getOperand(0);
1740 const Value *CmpRHS = CI->getOperand(1);
1741 if (NeedSwap)
1742 std::swap(CmpLHS, CmpRHS);
1743
1744 EVT CmpVT = TLI.getValueType(CmpLHS->getType());
1745 // Emit a compare of the LHS and RHS, setting the flags.
1746 if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT, CI->getDebugLoc()))
1747 return false;
1748
1749 if (SETFOpc) {
1750 unsigned FlagReg1 = createResultReg(&X86::GR8RegClass);
1751 unsigned FlagReg2 = createResultReg(&X86::GR8RegClass);
1752 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[0]),
1753 FlagReg1);
1754 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[1]),
1755 FlagReg2);
1756 auto const &II = TII.get(SETFOpc[2]);
1757 if (II.getNumDefs()) {
1758 unsigned TmpReg = createResultReg(&X86::GR8RegClass);
1759 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, TmpReg)
1760 .addReg(FlagReg2).addReg(FlagReg1);
1761 } else {
1762 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1763 .addReg(FlagReg2).addReg(FlagReg1);
1764 }
1765 }
1766 NeedTest = false;
1767 } else if (foldX86XALUIntrinsic(CC, I, Cond)) {
1768 // Fake request the condition, otherwise the intrinsic might be completely
1769 // optimized away.
1770 unsigned TmpReg = getRegForValue(Cond);
1771 if (TmpReg == 0)
1772 return false;
1773
1774 NeedTest = false;
1775 }
1776
1777 if (NeedTest) {
1778 // Selects operate on i1, however, CondReg is 8 bits width and may contain
1779 // garbage. Indeed, only the less significant bit is supposed to be
1780 // accurate. If we read more than the lsb, we may see non-zero values
1781 // whereas lsb is zero. Therefore, we have to truncate Op0Reg to i1 for
1782 // the select. This is achieved by performing TEST against 1.
1783 unsigned CondReg = getRegForValue(Cond);
1784 if (CondReg == 0)
1785 return false;
1786 bool CondIsKill = hasTrivialKill(Cond);
1787
1788 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
1789 .addReg(CondReg, getKillRegState(CondIsKill)).addImm(1);
1790 }
1791
1792 const Value *LHS = I->getOperand(1);
1793 const Value *RHS = I->getOperand(2);
1794
1795 unsigned RHSReg = getRegForValue(RHS);
1796 bool RHSIsKill = hasTrivialKill(RHS);
1797
1798 unsigned LHSReg = getRegForValue(LHS);
1799 bool LHSIsKill = hasTrivialKill(LHS);
1800
1801 if (!LHSReg || !RHSReg)
1802 return false;
1803
1804 unsigned Opc = X86::getCMovFromCond(CC, RC->getSize());
1805 unsigned ResultReg = fastEmitInst_rr(Opc, RC, RHSReg, RHSIsKill,
1806 LHSReg, LHSIsKill);
1807 updateValueMap(I, ResultReg);
1808 return true;
1809}
1810
1811/// \brief Emit SSE instructions to lower the select.
1812///
1813/// Try to use SSE1/SSE2 instructions to simulate a select without branches.
1814/// This lowers fp selects into a CMP/AND/ANDN/OR sequence when the necessary
1815/// SSE instructions are available.
1816bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
1817 // Optimize conditions coming from a compare if both instructions are in the
1818 // same basic block (values defined in other basic blocks may not have
1819 // initialized registers).
1820 const auto *CI = dyn_cast<FCmpInst>(I->getOperand(0));
1821 if (!CI || (CI->getParent() != I->getParent()))
1822 return false;
1823
1824 if (I->getType() != CI->getOperand(0)->getType() ||
1825 !((Subtarget->hasSSE1() && RetVT == MVT::f32) ||
1826 (Subtarget->hasSSE2() && RetVT == MVT::f64)))
1827 return false;
1828
1829 const Value *CmpLHS = CI->getOperand(0);
1830 const Value *CmpRHS = CI->getOperand(1);
1831 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
1832
1833 // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x, 0.0.
1834 // We don't have to materialize a zero constant for this case and can just use
1835 // %x again on the RHS.
1836 if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO) {
1837 const auto *CmpRHSC = dyn_cast<ConstantFP>(CmpRHS);
1838 if (CmpRHSC && CmpRHSC->isNullValue())
1839 CmpRHS = CmpLHS;
1840 }
1841
1842 unsigned CC;
1843 bool NeedSwap;
1844 std::tie(CC, NeedSwap) = getX86SSEConditionCode(Predicate);
1845 if (CC > 7)
1846 return false;
1847
1848 if (NeedSwap)
1849 std::swap(CmpLHS, CmpRHS);
1850
1851 static unsigned OpcTable[2][2][4] = {
1852 { { X86::CMPSSrr, X86::FsANDPSrr, X86::FsANDNPSrr, X86::FsORPSrr },
1853 { X86::VCMPSSrr, X86::VFsANDPSrr, X86::VFsANDNPSrr, X86::VFsORPSrr } },
1854 { { X86::CMPSDrr, X86::FsANDPDrr, X86::FsANDNPDrr, X86::FsORPDrr },
1855 { X86::VCMPSDrr, X86::VFsANDPDrr, X86::VFsANDNPDrr, X86::VFsORPDrr } }
1856 };
1857
1858 bool HasAVX = Subtarget->hasAVX();
1859 unsigned *Opc = nullptr;
1860 switch (RetVT.SimpleTy) {
1861 default: return false;
1862 case MVT::f32: Opc = &OpcTable[0][HasAVX][0]; break;
1863 case MVT::f64: Opc = &OpcTable[1][HasAVX][0]; break;
1864 }
1865
1866 const Value *LHS = I->getOperand(1);
1867 const Value *RHS = I->getOperand(2);
1868
1869 unsigned LHSReg = getRegForValue(LHS);
1870 bool LHSIsKill = hasTrivialKill(LHS);
1871
1872 unsigned RHSReg = getRegForValue(RHS);
1873 bool RHSIsKill = hasTrivialKill(RHS);
1874
1875 unsigned CmpLHSReg = getRegForValue(CmpLHS);
1876 bool CmpLHSIsKill = hasTrivialKill(CmpLHS);
1877
1878 unsigned CmpRHSReg = getRegForValue(CmpRHS);
1879 bool CmpRHSIsKill = hasTrivialKill(CmpRHS);
1880
1881 if (!LHSReg || !RHSReg || !CmpLHS || !CmpRHS)
1882 return false;
1883
1884 const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
1885 unsigned CmpReg = fastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpLHSIsKill,
1886 CmpRHSReg, CmpRHSIsKill, CC);
1887 unsigned AndReg = fastEmitInst_rr(Opc[1], RC, CmpReg, /*IsKill=*/false,
1888 LHSReg, LHSIsKill);
1889 unsigned AndNReg = fastEmitInst_rr(Opc[2], RC, CmpReg, /*IsKill=*/true,
1890 RHSReg, RHSIsKill);
1891 unsigned ResultReg = fastEmitInst_rr(Opc[3], RC, AndNReg, /*IsKill=*/true,
1892 AndReg, /*IsKill=*/true);
1893 updateValueMap(I, ResultReg);
1894 return true;
1895}
1896
1897bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) {
1898 // These are pseudo CMOV instructions and will be later expanded into control-
1899 // flow.
1900 unsigned Opc;
1901 switch (RetVT.SimpleTy) {
1902 default: return false;
1903 case MVT::i8: Opc = X86::CMOV_GR8; break;
1904 case MVT::i16: Opc = X86::CMOV_GR16; break;
1905 case MVT::i32: Opc = X86::CMOV_GR32; break;
1906 case MVT::f32: Opc = X86::CMOV_FR32; break;
1907 case MVT::f64: Opc = X86::CMOV_FR64; break;
1908 }
1909
1910 const Value *Cond = I->getOperand(0);
1911 X86::CondCode CC = X86::COND_NE;
1912
1913 // Optimize conditions coming from a compare if both instructions are in the
1914 // same basic block (values defined in other basic blocks may not have
1915 // initialized registers).
1916 const auto *CI = dyn_cast<CmpInst>(Cond);
1917 if (CI && (CI->getParent() == I->getParent())) {
1918 bool NeedSwap;
1919 std::tie(CC, NeedSwap) = getX86ConditionCode(CI->getPredicate());
1920 if (CC > X86::LAST_VALID_COND)
1921 return false;
1922
1923 const Value *CmpLHS = CI->getOperand(0);
1924 const Value *CmpRHS = CI->getOperand(1);
1925
1926 if (NeedSwap)
1927 std::swap(CmpLHS, CmpRHS);
1928
1929 EVT CmpVT = TLI.getValueType(CmpLHS->getType());
1930 if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT, CI->getDebugLoc()))
1931 return false;
1932 } else {
1933 unsigned CondReg = getRegForValue(Cond);
1934 if (CondReg == 0)
1935 return false;
1936 bool CondIsKill = hasTrivialKill(Cond);
1937 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
1938 .addReg(CondReg, getKillRegState(CondIsKill)).addImm(1);
1939 }
1940
1941 const Value *LHS = I->getOperand(1);
1942 const Value *RHS = I->getOperand(2);
1943
1944 unsigned LHSReg = getRegForValue(LHS);
1945 bool LHSIsKill = hasTrivialKill(LHS);
1946
1947 unsigned RHSReg = getRegForValue(RHS);
1948 bool RHSIsKill = hasTrivialKill(RHS);
1949
1950 if (!LHSReg || !RHSReg)
1951 return false;
1952
1953 const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
1954
1955 unsigned ResultReg =
1956 fastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill, LHSReg, LHSIsKill, CC);
1957 updateValueMap(I, ResultReg);
1958 return true;
1959}
1960
1961bool X86FastISel::X86SelectSelect(const Instruction *I) {
1962 MVT RetVT;
1963 if (!isTypeLegal(I->getType(), RetVT))
1964 return false;
1965
1966 // Check if we can fold the select.
1967 if (const auto *CI = dyn_cast<CmpInst>(I->getOperand(0))) {
1968 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
1969 const Value *Opnd = nullptr;
1970 switch (Predicate) {
1971 default: break;
1972 case CmpInst::FCMP_FALSE: Opnd = I->getOperand(2); break;
1973 case CmpInst::FCMP_TRUE: Opnd = I->getOperand(1); break;
1974 }
1975 // No need for a select anymore - this is an unconditional move.
1976 if (Opnd) {
1977 unsigned OpReg = getRegForValue(Opnd);
1978 if (OpReg == 0)
1979 return false;
1980 bool OpIsKill = hasTrivialKill(Opnd);
1981 const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
1982 unsigned ResultReg = createResultReg(RC);
1983 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1984 TII.get(TargetOpcode::COPY), ResultReg)
1985 .addReg(OpReg, getKillRegState(OpIsKill));
1986 updateValueMap(I, ResultReg);
1987 return true;
1988 }
1989 }
1990
1991 // First try to use real conditional move instructions.
1992 if (X86FastEmitCMoveSelect(RetVT, I))
1993 return true;
1994
1995 // Try to use a sequence of SSE instructions to simulate a conditional move.
1996 if (X86FastEmitSSESelect(RetVT, I))
1997 return true;
1998
1999 // Fall-back to pseudo conditional move instructions, which will be later
2000 // converted to control-flow.
2001 if (X86FastEmitPseudoSelect(RetVT, I))
2002 return true;
2003
2004 return false;
2005}
2006
Andrea Di Biagio62622d22015-02-10 12:04:41 +00002007// Helper method used by X86SelectFPExt and X86SelectFPTrunc.
2008bool X86FastISel::X86SelectFPExtOrFPTrunc(const Instruction *I,
2009 unsigned TargetOpc,
2010 const TargetRegisterClass *RC) {
2011 assert((I->getOpcode() == Instruction::FPExt ||
2012 I->getOpcode() == Instruction::FPTrunc) &&
2013 "Instruction must be an FPExt or FPTrunc!");
2014
2015 unsigned OpReg = getRegForValue(I->getOperand(0));
2016 if (OpReg == 0)
2017 return false;
2018
2019 unsigned ResultReg = createResultReg(RC);
2020 MachineInstrBuilder MIB;
2021 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpc),
2022 ResultReg);
2023 if (Subtarget->hasAVX())
2024 MIB.addReg(OpReg);
2025 MIB.addReg(OpReg);
2026 updateValueMap(I, ResultReg);
2027 return true;
2028}
2029
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002030bool X86FastISel::X86SelectFPExt(const Instruction *I) {
Andrea Di Biagio62622d22015-02-10 12:04:41 +00002031 if (X86ScalarSSEf64 && I->getType()->isDoubleTy() &&
2032 I->getOperand(0)->getType()->isFloatTy()) {
2033 // fpext from float to double.
2034 unsigned Opc = Subtarget->hasAVX() ? X86::VCVTSS2SDrr : X86::CVTSS2SDrr;
2035 return X86SelectFPExtOrFPTrunc(I, Opc, &X86::FR64RegClass);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002036 }
2037
2038 return false;
2039}
2040
2041bool X86FastISel::X86SelectFPTrunc(const Instruction *I) {
Andrea Di Biagio62622d22015-02-10 12:04:41 +00002042 if (X86ScalarSSEf64 && I->getType()->isFloatTy() &&
2043 I->getOperand(0)->getType()->isDoubleTy()) {
2044 // fptrunc from double to float.
2045 unsigned Opc = Subtarget->hasAVX() ? X86::VCVTSD2SSrr : X86::CVTSD2SSrr;
2046 return X86SelectFPExtOrFPTrunc(I, Opc, &X86::FR32RegClass);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002047 }
2048
2049 return false;
2050}
2051
2052bool X86FastISel::X86SelectTrunc(const Instruction *I) {
2053 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
2054 EVT DstVT = TLI.getValueType(I->getType());
2055
2056 // This code only handles truncation to byte.
2057 if (DstVT != MVT::i8 && DstVT != MVT::i1)
2058 return false;
2059 if (!TLI.isTypeLegal(SrcVT))
2060 return false;
2061
2062 unsigned InputReg = getRegForValue(I->getOperand(0));
2063 if (!InputReg)
2064 // Unhandled operand. Halt "fast" selection and bail.
2065 return false;
2066
2067 if (SrcVT == MVT::i8) {
2068 // Truncate from i8 to i1; no code needed.
2069 updateValueMap(I, InputReg);
2070 return true;
2071 }
2072
2073 if (!Subtarget->is64Bit()) {
2074 // If we're on x86-32; we can't extract an i8 from a general register.
2075 // First issue a copy to GR16_ABCD or GR32_ABCD.
2076 const TargetRegisterClass *CopyRC =
2077 (SrcVT == MVT::i16) ? &X86::GR16_ABCDRegClass : &X86::GR32_ABCDRegClass;
2078 unsigned CopyReg = createResultReg(CopyRC);
2079 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2080 TII.get(TargetOpcode::COPY), CopyReg).addReg(InputReg);
2081 InputReg = CopyReg;
2082 }
2083
2084 // Issue an extract_subreg.
2085 unsigned ResultReg = fastEmitInst_extractsubreg(MVT::i8,
2086 InputReg, /*Kill=*/true,
2087 X86::sub_8bit);
2088 if (!ResultReg)
2089 return false;
2090
2091 updateValueMap(I, ResultReg);
2092 return true;
2093}
2094
2095bool X86FastISel::IsMemcpySmall(uint64_t Len) {
2096 return Len <= (Subtarget->is64Bit() ? 32 : 16);
2097}
2098
2099bool X86FastISel::TryEmitSmallMemcpy(X86AddressMode DestAM,
2100 X86AddressMode SrcAM, uint64_t Len) {
2101
2102 // Make sure we don't bloat code by inlining very large memcpy's.
2103 if (!IsMemcpySmall(Len))
2104 return false;
2105
2106 bool i64Legal = Subtarget->is64Bit();
2107
2108 // We don't care about alignment here since we just emit integer accesses.
2109 while (Len) {
2110 MVT VT;
2111 if (Len >= 8 && i64Legal)
2112 VT = MVT::i64;
2113 else if (Len >= 4)
2114 VT = MVT::i32;
2115 else if (Len >= 2)
2116 VT = MVT::i16;
2117 else
2118 VT = MVT::i8;
2119
2120 unsigned Reg;
2121 bool RV = X86FastEmitLoad(VT, SrcAM, nullptr, Reg);
2122 RV &= X86FastEmitStore(VT, Reg, /*Kill=*/true, DestAM);
2123 assert(RV && "Failed to emit load or store??");
2124
2125 unsigned Size = VT.getSizeInBits()/8;
2126 Len -= Size;
2127 DestAM.Disp += Size;
2128 SrcAM.Disp += Size;
2129 }
2130
2131 return true;
2132}
2133
2134bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
2135 // FIXME: Handle more intrinsics.
2136 switch (II->getIntrinsicID()) {
2137 default: return false;
2138 case Intrinsic::frameaddress: {
2139 Type *RetTy = II->getCalledFunction()->getReturnType();
2140
2141 MVT VT;
2142 if (!isTypeLegal(RetTy, VT))
2143 return false;
2144
2145 unsigned Opc;
2146 const TargetRegisterClass *RC = nullptr;
2147
2148 switch (VT.SimpleTy) {
2149 default: llvm_unreachable("Invalid result type for frameaddress.");
2150 case MVT::i32: Opc = X86::MOV32rm; RC = &X86::GR32RegClass; break;
2151 case MVT::i64: Opc = X86::MOV64rm; RC = &X86::GR64RegClass; break;
2152 }
2153
2154 // This needs to be set before we call getPtrSizedFrameRegister, otherwise
2155 // we get the wrong frame register.
2156 MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo();
2157 MFI->setFrameAddressIsTaken(true);
2158
Eric Christophera1c535b2015-02-02 23:03:45 +00002159 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002160 unsigned FrameReg = RegInfo->getPtrSizedFrameRegister(*(FuncInfo.MF));
2161 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
2162 (FrameReg == X86::EBP && VT == MVT::i32)) &&
2163 "Invalid Frame Register!");
2164
2165 // Always make a copy of the frame register to to a vreg first, so that we
2166 // never directly reference the frame register (the TwoAddressInstruction-
2167 // Pass doesn't like that).
2168 unsigned SrcReg = createResultReg(RC);
2169 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2170 TII.get(TargetOpcode::COPY), SrcReg).addReg(FrameReg);
2171
2172 // Now recursively load from the frame address.
2173 // movq (%rbp), %rax
2174 // movq (%rax), %rax
2175 // movq (%rax), %rax
2176 // ...
2177 unsigned DestReg;
2178 unsigned Depth = cast<ConstantInt>(II->getOperand(0))->getZExtValue();
2179 while (Depth--) {
2180 DestReg = createResultReg(RC);
2181 addDirectMem(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2182 TII.get(Opc), DestReg), SrcReg);
2183 SrcReg = DestReg;
2184 }
2185
2186 updateValueMap(II, SrcReg);
2187 return true;
2188 }
2189 case Intrinsic::memcpy: {
2190 const MemCpyInst *MCI = cast<MemCpyInst>(II);
2191 // Don't handle volatile or variable length memcpys.
2192 if (MCI->isVolatile())
2193 return false;
2194
2195 if (isa<ConstantInt>(MCI->getLength())) {
2196 // Small memcpy's are common enough that we want to do them
2197 // without a call if possible.
2198 uint64_t Len = cast<ConstantInt>(MCI->getLength())->getZExtValue();
2199 if (IsMemcpySmall(Len)) {
2200 X86AddressMode DestAM, SrcAM;
2201 if (!X86SelectAddress(MCI->getRawDest(), DestAM) ||
2202 !X86SelectAddress(MCI->getRawSource(), SrcAM))
2203 return false;
2204 TryEmitSmallMemcpy(DestAM, SrcAM, Len);
2205 return true;
2206 }
2207 }
2208
2209 unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;
2210 if (!MCI->getLength()->getType()->isIntegerTy(SizeWidth))
2211 return false;
2212
2213 if (MCI->getSourceAddressSpace() > 255 || MCI->getDestAddressSpace() > 255)
2214 return false;
2215
2216 return lowerCallTo(II, "memcpy", II->getNumArgOperands() - 2);
2217 }
2218 case Intrinsic::memset: {
2219 const MemSetInst *MSI = cast<MemSetInst>(II);
2220
2221 if (MSI->isVolatile())
2222 return false;
2223
2224 unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;
2225 if (!MSI->getLength()->getType()->isIntegerTy(SizeWidth))
2226 return false;
2227
2228 if (MSI->getDestAddressSpace() > 255)
2229 return false;
2230
2231 return lowerCallTo(II, "memset", II->getNumArgOperands() - 2);
2232 }
2233 case Intrinsic::stackprotector: {
2234 // Emit code to store the stack guard onto the stack.
2235 EVT PtrTy = TLI.getPointerTy();
2236
2237 const Value *Op1 = II->getArgOperand(0); // The guard's value.
2238 const AllocaInst *Slot = cast<AllocaInst>(II->getArgOperand(1));
2239
2240 MFI.setStackProtectorIndex(FuncInfo.StaticAllocaMap[Slot]);
2241
2242 // Grab the frame index.
2243 X86AddressMode AM;
2244 if (!X86SelectAddress(Slot, AM)) return false;
2245 if (!X86FastEmitStore(PtrTy, Op1, AM)) return false;
2246 return true;
2247 }
2248 case Intrinsic::dbg_declare: {
2249 const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
2250 X86AddressMode AM;
2251 assert(DI->getAddress() && "Null address should be checked earlier!");
2252 if (!X86SelectAddress(DI->getAddress(), AM))
2253 return false;
2254 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
2255 // FIXME may need to add RegState::Debug to any registers produced,
2256 // although ESP/EBP should be the only ones at the moment.
2257 addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II), AM)
2258 .addImm(0)
2259 .addMetadata(DI->getVariable())
2260 .addMetadata(DI->getExpression());
2261 return true;
2262 }
2263 case Intrinsic::trap: {
2264 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TRAP));
2265 return true;
2266 }
2267 case Intrinsic::sqrt: {
2268 if (!Subtarget->hasSSE1())
2269 return false;
2270
2271 Type *RetTy = II->getCalledFunction()->getReturnType();
2272
2273 MVT VT;
2274 if (!isTypeLegal(RetTy, VT))
2275 return false;
2276
2277 // Unfortunately we can't use fastEmit_r, because the AVX version of FSQRT
2278 // is not generated by FastISel yet.
2279 // FIXME: Update this code once tablegen can handle it.
2280 static const unsigned SqrtOpc[2][2] = {
2281 {X86::SQRTSSr, X86::VSQRTSSr},
2282 {X86::SQRTSDr, X86::VSQRTSDr}
2283 };
2284 bool HasAVX = Subtarget->hasAVX();
2285 unsigned Opc;
2286 const TargetRegisterClass *RC;
2287 switch (VT.SimpleTy) {
2288 default: return false;
2289 case MVT::f32: Opc = SqrtOpc[0][HasAVX]; RC = &X86::FR32RegClass; break;
2290 case MVT::f64: Opc = SqrtOpc[1][HasAVX]; RC = &X86::FR64RegClass; break;
2291 }
2292
2293 const Value *SrcVal = II->getArgOperand(0);
2294 unsigned SrcReg = getRegForValue(SrcVal);
2295
2296 if (SrcReg == 0)
2297 return false;
2298
2299 unsigned ImplicitDefReg = 0;
2300 if (HasAVX) {
2301 ImplicitDefReg = createResultReg(RC);
2302 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2303 TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
2304 }
2305
2306 unsigned ResultReg = createResultReg(RC);
2307 MachineInstrBuilder MIB;
2308 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
2309 ResultReg);
2310
2311 if (ImplicitDefReg)
2312 MIB.addReg(ImplicitDefReg);
2313
2314 MIB.addReg(SrcReg);
2315
2316 updateValueMap(II, ResultReg);
2317 return true;
2318 }
2319 case Intrinsic::sadd_with_overflow:
2320 case Intrinsic::uadd_with_overflow:
2321 case Intrinsic::ssub_with_overflow:
2322 case Intrinsic::usub_with_overflow:
2323 case Intrinsic::smul_with_overflow:
2324 case Intrinsic::umul_with_overflow: {
2325 // This implements the basic lowering of the xalu with overflow intrinsics
2326 // into add/sub/mul followed by either seto or setb.
2327 const Function *Callee = II->getCalledFunction();
2328 auto *Ty = cast<StructType>(Callee->getReturnType());
2329 Type *RetTy = Ty->getTypeAtIndex(0U);
2330 Type *CondTy = Ty->getTypeAtIndex(1);
2331
2332 MVT VT;
2333 if (!isTypeLegal(RetTy, VT))
2334 return false;
2335
2336 if (VT < MVT::i8 || VT > MVT::i64)
2337 return false;
2338
2339 const Value *LHS = II->getArgOperand(0);
2340 const Value *RHS = II->getArgOperand(1);
2341
2342 // Canonicalize immediate to the RHS.
2343 if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS) &&
2344 isCommutativeIntrinsic(II))
2345 std::swap(LHS, RHS);
2346
2347 bool UseIncDec = false;
2348 if (isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isOne())
2349 UseIncDec = true;
2350
2351 unsigned BaseOpc, CondOpc;
2352 switch (II->getIntrinsicID()) {
2353 default: llvm_unreachable("Unexpected intrinsic!");
2354 case Intrinsic::sadd_with_overflow:
2355 BaseOpc = UseIncDec ? unsigned(X86ISD::INC) : unsigned(ISD::ADD);
2356 CondOpc = X86::SETOr;
2357 break;
2358 case Intrinsic::uadd_with_overflow:
2359 BaseOpc = ISD::ADD; CondOpc = X86::SETBr; break;
2360 case Intrinsic::ssub_with_overflow:
2361 BaseOpc = UseIncDec ? unsigned(X86ISD::DEC) : unsigned(ISD::SUB);
2362 CondOpc = X86::SETOr;
2363 break;
2364 case Intrinsic::usub_with_overflow:
2365 BaseOpc = ISD::SUB; CondOpc = X86::SETBr; break;
2366 case Intrinsic::smul_with_overflow:
2367 BaseOpc = X86ISD::SMUL; CondOpc = X86::SETOr; break;
2368 case Intrinsic::umul_with_overflow:
2369 BaseOpc = X86ISD::UMUL; CondOpc = X86::SETOr; break;
2370 }
2371
2372 unsigned LHSReg = getRegForValue(LHS);
2373 if (LHSReg == 0)
2374 return false;
2375 bool LHSIsKill = hasTrivialKill(LHS);
2376
2377 unsigned ResultReg = 0;
2378 // Check if we have an immediate version.
2379 if (const auto *CI = dyn_cast<ConstantInt>(RHS)) {
2380 static const unsigned Opc[2][4] = {
2381 { X86::INC8r, X86::INC16r, X86::INC32r, X86::INC64r },
2382 { X86::DEC8r, X86::DEC16r, X86::DEC32r, X86::DEC64r }
2383 };
2384
2385 if (BaseOpc == X86ISD::INC || BaseOpc == X86ISD::DEC) {
2386 ResultReg = createResultReg(TLI.getRegClassFor(VT));
2387 bool IsDec = BaseOpc == X86ISD::DEC;
2388 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2389 TII.get(Opc[IsDec][VT.SimpleTy-MVT::i8]), ResultReg)
2390 .addReg(LHSReg, getKillRegState(LHSIsKill));
2391 } else
2392 ResultReg = fastEmit_ri(VT, VT, BaseOpc, LHSReg, LHSIsKill,
2393 CI->getZExtValue());
2394 }
2395
2396 unsigned RHSReg;
2397 bool RHSIsKill;
2398 if (!ResultReg) {
2399 RHSReg = getRegForValue(RHS);
2400 if (RHSReg == 0)
2401 return false;
2402 RHSIsKill = hasTrivialKill(RHS);
2403 ResultReg = fastEmit_rr(VT, VT, BaseOpc, LHSReg, LHSIsKill, RHSReg,
2404 RHSIsKill);
2405 }
2406
2407 // FastISel doesn't have a pattern for all X86::MUL*r and X86::IMUL*r. Emit
2408 // it manually.
2409 if (BaseOpc == X86ISD::UMUL && !ResultReg) {
2410 static const unsigned MULOpc[] =
2411 { X86::MUL8r, X86::MUL16r, X86::MUL32r, X86::MUL64r };
2412 static const unsigned Reg[] = { X86::AL, X86::AX, X86::EAX, X86::RAX };
2413 // First copy the first operand into RAX, which is an implicit input to
2414 // the X86::MUL*r instruction.
2415 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2416 TII.get(TargetOpcode::COPY), Reg[VT.SimpleTy-MVT::i8])
2417 .addReg(LHSReg, getKillRegState(LHSIsKill));
2418 ResultReg = fastEmitInst_r(MULOpc[VT.SimpleTy-MVT::i8],
2419 TLI.getRegClassFor(VT), RHSReg, RHSIsKill);
2420 } else if (BaseOpc == X86ISD::SMUL && !ResultReg) {
2421 static const unsigned MULOpc[] =
2422 { X86::IMUL8r, X86::IMUL16rr, X86::IMUL32rr, X86::IMUL64rr };
2423 if (VT == MVT::i8) {
2424 // Copy the first operand into AL, which is an implicit input to the
2425 // X86::IMUL8r instruction.
2426 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2427 TII.get(TargetOpcode::COPY), X86::AL)
2428 .addReg(LHSReg, getKillRegState(LHSIsKill));
2429 ResultReg = fastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg,
2430 RHSIsKill);
2431 } else
2432 ResultReg = fastEmitInst_rr(MULOpc[VT.SimpleTy-MVT::i8],
2433 TLI.getRegClassFor(VT), LHSReg, LHSIsKill,
2434 RHSReg, RHSIsKill);
2435 }
2436
2437 if (!ResultReg)
2438 return false;
2439
2440 unsigned ResultReg2 = FuncInfo.CreateRegs(CondTy);
2441 assert((ResultReg+1) == ResultReg2 && "Nonconsecutive result registers.");
2442 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CondOpc),
2443 ResultReg2);
2444
2445 updateValueMap(II, ResultReg, 2);
2446 return true;
2447 }
2448 case Intrinsic::x86_sse_cvttss2si:
2449 case Intrinsic::x86_sse_cvttss2si64:
2450 case Intrinsic::x86_sse2_cvttsd2si:
2451 case Intrinsic::x86_sse2_cvttsd2si64: {
2452 bool IsInputDouble;
2453 switch (II->getIntrinsicID()) {
2454 default: llvm_unreachable("Unexpected intrinsic.");
2455 case Intrinsic::x86_sse_cvttss2si:
2456 case Intrinsic::x86_sse_cvttss2si64:
2457 if (!Subtarget->hasSSE1())
2458 return false;
2459 IsInputDouble = false;
2460 break;
2461 case Intrinsic::x86_sse2_cvttsd2si:
2462 case Intrinsic::x86_sse2_cvttsd2si64:
2463 if (!Subtarget->hasSSE2())
2464 return false;
2465 IsInputDouble = true;
2466 break;
2467 }
2468
2469 Type *RetTy = II->getCalledFunction()->getReturnType();
2470 MVT VT;
2471 if (!isTypeLegal(RetTy, VT))
2472 return false;
2473
2474 static const unsigned CvtOpc[2][2][2] = {
2475 { { X86::CVTTSS2SIrr, X86::VCVTTSS2SIrr },
2476 { X86::CVTTSS2SI64rr, X86::VCVTTSS2SI64rr } },
2477 { { X86::CVTTSD2SIrr, X86::VCVTTSD2SIrr },
2478 { X86::CVTTSD2SI64rr, X86::VCVTTSD2SI64rr } }
2479 };
2480 bool HasAVX = Subtarget->hasAVX();
2481 unsigned Opc;
2482 switch (VT.SimpleTy) {
2483 default: llvm_unreachable("Unexpected result type.");
2484 case MVT::i32: Opc = CvtOpc[IsInputDouble][0][HasAVX]; break;
2485 case MVT::i64: Opc = CvtOpc[IsInputDouble][1][HasAVX]; break;
2486 }
2487
2488 // Check if we can fold insertelement instructions into the convert.
2489 const Value *Op = II->getArgOperand(0);
2490 while (auto *IE = dyn_cast<InsertElementInst>(Op)) {
2491 const Value *Index = IE->getOperand(2);
2492 if (!isa<ConstantInt>(Index))
2493 break;
2494 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
2495
2496 if (Idx == 0) {
2497 Op = IE->getOperand(1);
2498 break;
2499 }
2500 Op = IE->getOperand(0);
2501 }
2502
2503 unsigned Reg = getRegForValue(Op);
2504 if (Reg == 0)
2505 return false;
2506
2507 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
2508 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
2509 .addReg(Reg);
2510
2511 updateValueMap(II, ResultReg);
2512 return true;
2513 }
2514 }
2515}
2516
2517bool X86FastISel::fastLowerArguments() {
2518 if (!FuncInfo.CanLowerReturn)
2519 return false;
2520
2521 const Function *F = FuncInfo.Fn;
2522 if (F->isVarArg())
2523 return false;
2524
2525 CallingConv::ID CC = F->getCallingConv();
2526 if (CC != CallingConv::C)
2527 return false;
2528
2529 if (Subtarget->isCallingConvWin64(CC))
2530 return false;
2531
2532 if (!Subtarget->is64Bit())
2533 return false;
2534
2535 // Only handle simple cases. i.e. Up to 6 i32/i64 scalar arguments.
2536 unsigned GPRCnt = 0;
2537 unsigned FPRCnt = 0;
2538 unsigned Idx = 0;
2539 for (auto const &Arg : F->args()) {
2540 // The first argument is at index 1.
2541 ++Idx;
2542 if (F->getAttributes().hasAttribute(Idx, Attribute::ByVal) ||
2543 F->getAttributes().hasAttribute(Idx, Attribute::InReg) ||
2544 F->getAttributes().hasAttribute(Idx, Attribute::StructRet) ||
2545 F->getAttributes().hasAttribute(Idx, Attribute::Nest))
2546 return false;
2547
2548 Type *ArgTy = Arg.getType();
2549 if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy())
2550 return false;
2551
2552 EVT ArgVT = TLI.getValueType(ArgTy);
2553 if (!ArgVT.isSimple()) return false;
2554 switch (ArgVT.getSimpleVT().SimpleTy) {
2555 default: return false;
2556 case MVT::i32:
2557 case MVT::i64:
2558 ++GPRCnt;
2559 break;
2560 case MVT::f32:
2561 case MVT::f64:
2562 if (!Subtarget->hasSSE1())
2563 return false;
2564 ++FPRCnt;
2565 break;
2566 }
2567
2568 if (GPRCnt > 6)
2569 return false;
2570
2571 if (FPRCnt > 8)
2572 return false;
2573 }
2574
2575 static const MCPhysReg GPR32ArgRegs[] = {
2576 X86::EDI, X86::ESI, X86::EDX, X86::ECX, X86::R8D, X86::R9D
2577 };
2578 static const MCPhysReg GPR64ArgRegs[] = {
2579 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8 , X86::R9
2580 };
2581 static const MCPhysReg XMMArgRegs[] = {
2582 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2583 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2584 };
2585
2586 unsigned GPRIdx = 0;
2587 unsigned FPRIdx = 0;
2588 for (auto const &Arg : F->args()) {
2589 MVT VT = TLI.getSimpleValueType(Arg.getType());
2590 const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
2591 unsigned SrcReg;
2592 switch (VT.SimpleTy) {
2593 default: llvm_unreachable("Unexpected value type.");
2594 case MVT::i32: SrcReg = GPR32ArgRegs[GPRIdx++]; break;
2595 case MVT::i64: SrcReg = GPR64ArgRegs[GPRIdx++]; break;
2596 case MVT::f32: // fall-through
2597 case MVT::f64: SrcReg = XMMArgRegs[FPRIdx++]; break;
2598 }
2599 unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
2600 // FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
2601 // Without this, EmitLiveInCopies may eliminate the livein if its only
2602 // use is a bitcast (which isn't turned into an instruction).
2603 unsigned ResultReg = createResultReg(RC);
2604 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2605 TII.get(TargetOpcode::COPY), ResultReg)
2606 .addReg(DstReg, getKillRegState(true));
2607 updateValueMap(&Arg, ResultReg);
2608 }
2609 return true;
2610}
2611
2612static unsigned computeBytesPoppedByCallee(const X86Subtarget *Subtarget,
2613 CallingConv::ID CC,
2614 ImmutableCallSite *CS) {
2615 if (Subtarget->is64Bit())
2616 return 0;
2617 if (Subtarget->getTargetTriple().isOSMSVCRT())
2618 return 0;
2619 if (CC == CallingConv::Fast || CC == CallingConv::GHC ||
2620 CC == CallingConv::HiPE)
2621 return 0;
2622 if (CS && !CS->paramHasAttr(1, Attribute::StructRet))
2623 return 0;
2624 if (CS && CS->paramHasAttr(1, Attribute::InReg))
2625 return 0;
2626 return 4;
2627}
2628
2629bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
2630 auto &OutVals = CLI.OutVals;
2631 auto &OutFlags = CLI.OutFlags;
2632 auto &OutRegs = CLI.OutRegs;
2633 auto &Ins = CLI.Ins;
2634 auto &InRegs = CLI.InRegs;
2635 CallingConv::ID CC = CLI.CallConv;
2636 bool &IsTailCall = CLI.IsTailCall;
2637 bool IsVarArg = CLI.IsVarArg;
2638 const Value *Callee = CLI.Callee;
2639 const char *SymName = CLI.SymName;
2640
2641 bool Is64Bit = Subtarget->is64Bit();
2642 bool IsWin64 = Subtarget->isCallingConvWin64(CC);
2643
2644 // Handle only C, fastcc, and webkit_js calling conventions for now.
2645 switch (CC) {
2646 default: return false;
2647 case CallingConv::C:
2648 case CallingConv::Fast:
2649 case CallingConv::WebKit_JS:
2650 case CallingConv::X86_FastCall:
2651 case CallingConv::X86_64_Win64:
2652 case CallingConv::X86_64_SysV:
2653 break;
2654 }
2655
2656 // Allow SelectionDAG isel to handle tail calls.
2657 if (IsTailCall)
2658 return false;
2659
2660 // fastcc with -tailcallopt is intended to provide a guaranteed
2661 // tail call optimization. Fastisel doesn't know how to do that.
2662 if (CC == CallingConv::Fast && TM.Options.GuaranteedTailCallOpt)
2663 return false;
2664
2665 // Don't know how to handle Win64 varargs yet. Nothing special needed for
2666 // x86-32. Special handling for x86-64 is implemented.
2667 if (IsVarArg && IsWin64)
2668 return false;
2669
2670 // Don't know about inalloca yet.
2671 if (CLI.CS && CLI.CS->hasInAllocaArgument())
2672 return false;
2673
2674 // Fast-isel doesn't know about callee-pop yet.
2675 if (X86::isCalleePop(CC, Subtarget->is64Bit(), IsVarArg,
2676 TM.Options.GuaranteedTailCallOpt))
2677 return false;
2678
2679 SmallVector<MVT, 16> OutVTs;
2680 SmallVector<unsigned, 16> ArgRegs;
2681
2682 // If this is a constant i1/i8/i16 argument, promote to i32 to avoid an extra
2683 // instruction. This is safe because it is common to all FastISel supported
2684 // calling conventions on x86.
2685 for (int i = 0, e = OutVals.size(); i != e; ++i) {
2686 Value *&Val = OutVals[i];
2687 ISD::ArgFlagsTy Flags = OutFlags[i];
2688 if (auto *CI = dyn_cast<ConstantInt>(Val)) {
2689 if (CI->getBitWidth() < 32) {
2690 if (Flags.isSExt())
2691 Val = ConstantExpr::getSExt(CI, Type::getInt32Ty(CI->getContext()));
2692 else
2693 Val = ConstantExpr::getZExt(CI, Type::getInt32Ty(CI->getContext()));
2694 }
2695 }
2696
2697 // Passing bools around ends up doing a trunc to i1 and passing it.
2698 // Codegen this as an argument + "and 1".
2699 MVT VT;
2700 auto *TI = dyn_cast<TruncInst>(Val);
2701 unsigned ResultReg;
2702 if (TI && TI->getType()->isIntegerTy(1) && CLI.CS &&
2703 (TI->getParent() == CLI.CS->getInstruction()->getParent()) &&
2704 TI->hasOneUse()) {
2705 Value *PrevVal = TI->getOperand(0);
2706 ResultReg = getRegForValue(PrevVal);
2707
2708 if (!ResultReg)
2709 return false;
2710
2711 if (!isTypeLegal(PrevVal->getType(), VT))
2712 return false;
2713
2714 ResultReg =
2715 fastEmit_ri(VT, VT, ISD::AND, ResultReg, hasTrivialKill(PrevVal), 1);
2716 } else {
2717 if (!isTypeLegal(Val->getType(), VT))
2718 return false;
2719 ResultReg = getRegForValue(Val);
2720 }
2721
2722 if (!ResultReg)
2723 return false;
2724
2725 ArgRegs.push_back(ResultReg);
2726 OutVTs.push_back(VT);
2727 }
2728
2729 // Analyze operands of the call, assigning locations to each operand.
2730 SmallVector<CCValAssign, 16> ArgLocs;
2731 CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, ArgLocs, CLI.RetTy->getContext());
2732
2733 // Allocate shadow area for Win64
2734 if (IsWin64)
2735 CCInfo.AllocateStack(32, 8);
2736
2737 CCInfo.AnalyzeCallOperands(OutVTs, OutFlags, CC_X86);
2738
2739 // Get a count of how many bytes are to be pushed on the stack.
2740 unsigned NumBytes = CCInfo.getNextStackOffset();
2741
2742 // Issue CALLSEQ_START
2743 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
2744 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown))
Michael Kuperstein13fbd452015-02-01 16:56:04 +00002745 .addImm(NumBytes).addImm(0);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002746
2747 // Walk the register/memloc assignments, inserting copies/loads.
Eric Christophera1c535b2015-02-02 23:03:45 +00002748 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00002749 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2750 CCValAssign const &VA = ArgLocs[i];
2751 const Value *ArgVal = OutVals[VA.getValNo()];
2752 MVT ArgVT = OutVTs[VA.getValNo()];
2753
2754 if (ArgVT == MVT::x86mmx)
2755 return false;
2756
2757 unsigned ArgReg = ArgRegs[VA.getValNo()];
2758
2759 // Promote the value if needed.
2760 switch (VA.getLocInfo()) {
2761 case CCValAssign::Full: break;
2762 case CCValAssign::SExt: {
2763 assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
2764 "Unexpected extend");
2765 bool Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), ArgReg,
2766 ArgVT, ArgReg);
2767 assert(Emitted && "Failed to emit a sext!"); (void)Emitted;
2768 ArgVT = VA.getLocVT();
2769 break;
2770 }
2771 case CCValAssign::ZExt: {
2772 assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
2773 "Unexpected extend");
2774 bool Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), ArgReg,
2775 ArgVT, ArgReg);
2776 assert(Emitted && "Failed to emit a zext!"); (void)Emitted;
2777 ArgVT = VA.getLocVT();
2778 break;
2779 }
2780 case CCValAssign::AExt: {
2781 assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
2782 "Unexpected extend");
2783 bool Emitted = X86FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(), ArgReg,
2784 ArgVT, ArgReg);
2785 if (!Emitted)
2786 Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), ArgReg,
2787 ArgVT, ArgReg);
2788 if (!Emitted)
2789 Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), ArgReg,
2790 ArgVT, ArgReg);
2791
2792 assert(Emitted && "Failed to emit a aext!"); (void)Emitted;
2793 ArgVT = VA.getLocVT();
2794 break;
2795 }
2796 case CCValAssign::BCvt: {
2797 ArgReg = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, ArgReg,
2798 /*TODO: Kill=*/false);
2799 assert(ArgReg && "Failed to emit a bitcast!");
2800 ArgVT = VA.getLocVT();
2801 break;
2802 }
2803 case CCValAssign::VExt:
2804 // VExt has not been implemented, so this should be impossible to reach
2805 // for now. However, fallback to Selection DAG isel once implemented.
2806 return false;
2807 case CCValAssign::AExtUpper:
2808 case CCValAssign::SExtUpper:
2809 case CCValAssign::ZExtUpper:
2810 case CCValAssign::FPExt:
2811 llvm_unreachable("Unexpected loc info!");
2812 case CCValAssign::Indirect:
2813 // FIXME: Indirect doesn't need extending, but fast-isel doesn't fully
2814 // support this.
2815 return false;
2816 }
2817
2818 if (VA.isRegLoc()) {
2819 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2820 TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg);
2821 OutRegs.push_back(VA.getLocReg());
2822 } else {
2823 assert(VA.isMemLoc());
2824
2825 // Don't emit stores for undef values.
2826 if (isa<UndefValue>(ArgVal))
2827 continue;
2828
2829 unsigned LocMemOffset = VA.getLocMemOffset();
2830 X86AddressMode AM;
2831 AM.Base.Reg = RegInfo->getStackRegister();
2832 AM.Disp = LocMemOffset;
2833 ISD::ArgFlagsTy Flags = OutFlags[VA.getValNo()];
2834 unsigned Alignment = DL.getABITypeAlignment(ArgVal->getType());
2835 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
2836 MachinePointerInfo::getStack(LocMemOffset), MachineMemOperand::MOStore,
2837 ArgVT.getStoreSize(), Alignment);
2838 if (Flags.isByVal()) {
2839 X86AddressMode SrcAM;
2840 SrcAM.Base.Reg = ArgReg;
2841 if (!TryEmitSmallMemcpy(AM, SrcAM, Flags.getByValSize()))
2842 return false;
2843 } else if (isa<ConstantInt>(ArgVal) || isa<ConstantPointerNull>(ArgVal)) {
2844 // If this is a really simple value, emit this with the Value* version
2845 // of X86FastEmitStore. If it isn't simple, we don't want to do this,
2846 // as it can cause us to reevaluate the argument.
2847 if (!X86FastEmitStore(ArgVT, ArgVal, AM, MMO))
2848 return false;
2849 } else {
2850 bool ValIsKill = hasTrivialKill(ArgVal);
2851 if (!X86FastEmitStore(ArgVT, ArgReg, ValIsKill, AM, MMO))
2852 return false;
2853 }
2854 }
2855 }
2856
2857 // ELF / PIC requires GOT in the EBX register before function calls via PLT
2858 // GOT pointer.
2859 if (Subtarget->isPICStyleGOT()) {
2860 unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
2861 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2862 TII.get(TargetOpcode::COPY), X86::EBX).addReg(Base);
2863 }
2864
2865 if (Is64Bit && IsVarArg && !IsWin64) {
2866 // From AMD64 ABI document:
2867 // For calls that may call functions that use varargs or stdargs
2868 // (prototype-less calls or calls to functions containing ellipsis (...) in
2869 // the declaration) %al is used as hidden argument to specify the number
2870 // of SSE registers used. The contents of %al do not need to match exactly
2871 // the number of registers, but must be an ubound on the number of SSE
2872 // registers used and is in the range 0 - 8 inclusive.
2873
2874 // Count the number of XMM registers allocated.
2875 static const MCPhysReg XMMArgRegs[] = {
2876 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2877 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2878 };
2879 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
2880 assert((Subtarget->hasSSE1() || !NumXMMRegs)
2881 && "SSE registers cannot be used when SSE is disabled");
2882 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV8ri),
2883 X86::AL).addImm(NumXMMRegs);
2884 }
2885
2886 // Materialize callee address in a register. FIXME: GV address can be
2887 // handled with a CALLpcrel32 instead.
2888 X86AddressMode CalleeAM;
2889 if (!X86SelectCallAddress(Callee, CalleeAM))
2890 return false;
2891
2892 unsigned CalleeOp = 0;
2893 const GlobalValue *GV = nullptr;
2894 if (CalleeAM.GV != nullptr) {
2895 GV = CalleeAM.GV;
2896 } else if (CalleeAM.Base.Reg != 0) {
2897 CalleeOp = CalleeAM.Base.Reg;
2898 } else
2899 return false;
2900
2901 // Issue the call.
2902 MachineInstrBuilder MIB;
2903 if (CalleeOp) {
2904 // Register-indirect call.
2905 unsigned CallOpc = Is64Bit ? X86::CALL64r : X86::CALL32r;
2906 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CallOpc))
2907 .addReg(CalleeOp);
2908 } else {
2909 // Direct call.
2910 assert(GV && "Not a direct call");
2911 unsigned CallOpc = Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32;
2912
2913 // See if we need any target-specific flags on the GV operand.
2914 unsigned char OpFlags = 0;
2915
2916 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to
2917 // external symbols most go through the PLT in PIC mode. If the symbol
2918 // has hidden or protected visibility, or if it is static or local, then
2919 // we don't need to use the PLT - we can directly call it.
2920 if (Subtarget->isTargetELF() &&
2921 TM.getRelocationModel() == Reloc::PIC_ &&
2922 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
2923 OpFlags = X86II::MO_PLT;
2924 } else if (Subtarget->isPICStyleStubAny() &&
2925 (GV->isDeclaration() || GV->isWeakForLinker()) &&
2926 (!Subtarget->getTargetTriple().isMacOSX() ||
2927 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
2928 // PC-relative references to external symbols should go through $stub,
2929 // unless we're building with the leopard linker or later, which
2930 // automatically synthesizes these stubs.
2931 OpFlags = X86II::MO_DARWIN_STUB;
2932 }
2933
2934 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CallOpc));
2935 if (SymName)
2936 MIB.addExternalSymbol(SymName, OpFlags);
2937 else
2938 MIB.addGlobalAddress(GV, 0, OpFlags);
2939 }
2940
2941 // Add a register mask operand representing the call-preserved registers.
2942 // Proper defs for return values will be added by setPhysRegsDeadExcept().
2943 MIB.addRegMask(TRI.getCallPreservedMask(CC));
2944
2945 // Add an implicit use GOT pointer in EBX.
2946 if (Subtarget->isPICStyleGOT())
2947 MIB.addReg(X86::EBX, RegState::Implicit);
2948
2949 if (Is64Bit && IsVarArg && !IsWin64)
2950 MIB.addReg(X86::AL, RegState::Implicit);
2951
2952 // Add implicit physical register uses to the call.
2953 for (auto Reg : OutRegs)
2954 MIB.addReg(Reg, RegState::Implicit);
2955
2956 // Issue CALLSEQ_END
2957 unsigned NumBytesForCalleeToPop =
2958 computeBytesPoppedByCallee(Subtarget, CC, CLI.CS);
2959 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
2960 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
2961 .addImm(NumBytes).addImm(NumBytesForCalleeToPop);
2962
2963 // Now handle call return values.
2964 SmallVector<CCValAssign, 16> RVLocs;
2965 CCState CCRetInfo(CC, IsVarArg, *FuncInfo.MF, RVLocs,
2966 CLI.RetTy->getContext());
2967 CCRetInfo.AnalyzeCallResult(Ins, RetCC_X86);
2968
2969 // Copy all of the result registers out of their specified physreg.
2970 unsigned ResultReg = FuncInfo.CreateRegs(CLI.RetTy);
2971 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2972 CCValAssign &VA = RVLocs[i];
2973 EVT CopyVT = VA.getValVT();
2974 unsigned CopyReg = ResultReg + i;
2975
2976 // If this is x86-64, and we disabled SSE, we can't return FP values
2977 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
2978 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
2979 report_fatal_error("SSE register return with SSE disabled");
2980 }
2981
2982 // If we prefer to use the value in xmm registers, copy it out as f80 and
2983 // use a truncate to move it from fp stack reg to xmm reg.
2984 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
2985 isScalarFPTypeInSSEReg(VA.getValVT())) {
2986 CopyVT = MVT::f80;
2987 CopyReg = createResultReg(&X86::RFP80RegClass);
2988 }
2989
2990 // Copy out the result.
2991 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2992 TII.get(TargetOpcode::COPY), CopyReg).addReg(VA.getLocReg());
2993 InRegs.push_back(VA.getLocReg());
2994
2995 // Round the f80 to the right size, which also moves it to the appropriate
2996 // xmm register. This is accomplished by storing the f80 value in memory
2997 // and then loading it back.
2998 if (CopyVT != VA.getValVT()) {
2999 EVT ResVT = VA.getValVT();
3000 unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
3001 unsigned MemSize = ResVT.getSizeInBits()/8;
3002 int FI = MFI.CreateStackObject(MemSize, MemSize, false);
3003 addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3004 TII.get(Opc)), FI)
3005 .addReg(CopyReg);
3006 Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm;
3007 addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3008 TII.get(Opc), ResultReg + i), FI);
3009 }
3010 }
3011
3012 CLI.ResultReg = ResultReg;
3013 CLI.NumResultRegs = RVLocs.size();
3014 CLI.Call = MIB;
3015
3016 return true;
3017}
3018
3019bool
3020X86FastISel::fastSelectInstruction(const Instruction *I) {
3021 switch (I->getOpcode()) {
3022 default: break;
3023 case Instruction::Load:
3024 return X86SelectLoad(I);
3025 case Instruction::Store:
3026 return X86SelectStore(I);
3027 case Instruction::Ret:
3028 return X86SelectRet(I);
3029 case Instruction::ICmp:
3030 case Instruction::FCmp:
3031 return X86SelectCmp(I);
3032 case Instruction::ZExt:
3033 return X86SelectZExt(I);
3034 case Instruction::Br:
3035 return X86SelectBranch(I);
3036 case Instruction::LShr:
3037 case Instruction::AShr:
3038 case Instruction::Shl:
3039 return X86SelectShift(I);
3040 case Instruction::SDiv:
3041 case Instruction::UDiv:
3042 case Instruction::SRem:
3043 case Instruction::URem:
3044 return X86SelectDivRem(I);
3045 case Instruction::Select:
3046 return X86SelectSelect(I);
3047 case Instruction::Trunc:
3048 return X86SelectTrunc(I);
3049 case Instruction::FPExt:
3050 return X86SelectFPExt(I);
3051 case Instruction::FPTrunc:
3052 return X86SelectFPTrunc(I);
3053 case Instruction::IntToPtr: // Deliberate fall-through.
3054 case Instruction::PtrToInt: {
3055 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
3056 EVT DstVT = TLI.getValueType(I->getType());
3057 if (DstVT.bitsGT(SrcVT))
3058 return X86SelectZExt(I);
3059 if (DstVT.bitsLT(SrcVT))
3060 return X86SelectTrunc(I);
3061 unsigned Reg = getRegForValue(I->getOperand(0));
3062 if (Reg == 0) return false;
3063 updateValueMap(I, Reg);
3064 return true;
3065 }
3066 }
3067
3068 return false;
3069}
3070
3071unsigned X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) {
3072 if (VT > MVT::i64)
3073 return 0;
3074
3075 uint64_t Imm = CI->getZExtValue();
3076 if (Imm == 0) {
3077 unsigned SrcReg = fastEmitInst_(X86::MOV32r0, &X86::GR32RegClass);
3078 switch (VT.SimpleTy) {
3079 default: llvm_unreachable("Unexpected value type");
3080 case MVT::i1:
3081 case MVT::i8:
3082 return fastEmitInst_extractsubreg(MVT::i8, SrcReg, /*Kill=*/true,
3083 X86::sub_8bit);
3084 case MVT::i16:
3085 return fastEmitInst_extractsubreg(MVT::i16, SrcReg, /*Kill=*/true,
3086 X86::sub_16bit);
3087 case MVT::i32:
3088 return SrcReg;
3089 case MVT::i64: {
3090 unsigned ResultReg = createResultReg(&X86::GR64RegClass);
3091 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3092 TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg)
3093 .addImm(0).addReg(SrcReg).addImm(X86::sub_32bit);
3094 return ResultReg;
3095 }
3096 }
3097 }
3098
3099 unsigned Opc = 0;
3100 switch (VT.SimpleTy) {
3101 default: llvm_unreachable("Unexpected value type");
3102 case MVT::i1: VT = MVT::i8; // fall-through
3103 case MVT::i8: Opc = X86::MOV8ri; break;
3104 case MVT::i16: Opc = X86::MOV16ri; break;
3105 case MVT::i32: Opc = X86::MOV32ri; break;
3106 case MVT::i64: {
3107 if (isUInt<32>(Imm))
3108 Opc = X86::MOV32ri;
3109 else if (isInt<32>(Imm))
3110 Opc = X86::MOV64ri32;
3111 else
3112 Opc = X86::MOV64ri;
3113 break;
3114 }
3115 }
3116 if (VT == MVT::i64 && Opc == X86::MOV32ri) {
3117 unsigned SrcReg = fastEmitInst_i(Opc, &X86::GR32RegClass, Imm);
3118 unsigned ResultReg = createResultReg(&X86::GR64RegClass);
3119 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3120 TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg)
3121 .addImm(0).addReg(SrcReg).addImm(X86::sub_32bit);
3122 return ResultReg;
3123 }
3124 return fastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
3125}
3126
3127unsigned X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) {
3128 if (CFP->isNullValue())
3129 return fastMaterializeFloatZero(CFP);
3130
3131 // Can't handle alternate code models yet.
3132 CodeModel::Model CM = TM.getCodeModel();
3133 if (CM != CodeModel::Small && CM != CodeModel::Large)
3134 return 0;
3135
3136 // Get opcode and regclass of the output for the given load instruction.
3137 unsigned Opc = 0;
3138 const TargetRegisterClass *RC = nullptr;
3139 switch (VT.SimpleTy) {
3140 default: return 0;
3141 case MVT::f32:
3142 if (X86ScalarSSEf32) {
3143 Opc = Subtarget->hasAVX() ? X86::VMOVSSrm : X86::MOVSSrm;
3144 RC = &X86::FR32RegClass;
3145 } else {
3146 Opc = X86::LD_Fp32m;
3147 RC = &X86::RFP32RegClass;
3148 }
3149 break;
3150 case MVT::f64:
3151 if (X86ScalarSSEf64) {
3152 Opc = Subtarget->hasAVX() ? X86::VMOVSDrm : X86::MOVSDrm;
3153 RC = &X86::FR64RegClass;
3154 } else {
3155 Opc = X86::LD_Fp64m;
3156 RC = &X86::RFP64RegClass;
3157 }
3158 break;
3159 case MVT::f80:
3160 // No f80 support yet.
3161 return 0;
3162 }
3163
3164 // MachineConstantPool wants an explicit alignment.
3165 unsigned Align = DL.getPrefTypeAlignment(CFP->getType());
3166 if (Align == 0) {
3167 // Alignment of vector types. FIXME!
3168 Align = DL.getTypeAllocSize(CFP->getType());
3169 }
3170
3171 // x86-32 PIC requires a PIC base register for constant pools.
3172 unsigned PICBase = 0;
3173 unsigned char OpFlag = 0;
3174 if (Subtarget->isPICStyleStubPIC()) { // Not dynamic-no-pic
3175 OpFlag = X86II::MO_PIC_BASE_OFFSET;
3176 PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
3177 } else if (Subtarget->isPICStyleGOT()) {
3178 OpFlag = X86II::MO_GOTOFF;
3179 PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
3180 } else if (Subtarget->isPICStyleRIPRel() &&
3181 TM.getCodeModel() == CodeModel::Small) {
3182 PICBase = X86::RIP;
3183 }
3184
3185 // Create the load from the constant pool.
3186 unsigned CPI = MCP.getConstantPoolIndex(CFP, Align);
3187 unsigned ResultReg = createResultReg(RC);
3188
3189 if (CM == CodeModel::Large) {
3190 unsigned AddrReg = createResultReg(&X86::GR64RegClass);
3191 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV64ri),
3192 AddrReg)
3193 .addConstantPoolIndex(CPI, 0, OpFlag);
3194 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3195 TII.get(Opc), ResultReg);
3196 addDirectMem(MIB, AddrReg);
3197 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
3198 MachinePointerInfo::getConstantPool(), MachineMemOperand::MOLoad,
3199 TM.getDataLayout()->getPointerSize(), Align);
3200 MIB->addMemOperand(*FuncInfo.MF, MMO);
3201 return ResultReg;
3202 }
3203
3204 addConstantPoolReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3205 TII.get(Opc), ResultReg),
3206 CPI, PICBase, OpFlag);
3207 return ResultReg;
3208}
3209
3210unsigned X86FastISel::X86MaterializeGV(const GlobalValue *GV, MVT VT) {
3211 // Can't handle alternate code models yet.
3212 if (TM.getCodeModel() != CodeModel::Small)
3213 return 0;
3214
3215 // Materialize addresses with LEA/MOV instructions.
3216 X86AddressMode AM;
3217 if (X86SelectAddress(GV, AM)) {
3218 // If the expression is just a basereg, then we're done, otherwise we need
3219 // to emit an LEA.
3220 if (AM.BaseType == X86AddressMode::RegBase &&
3221 AM.IndexReg == 0 && AM.Disp == 0 && AM.GV == nullptr)
3222 return AM.Base.Reg;
3223
3224 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
3225 if (TM.getRelocationModel() == Reloc::Static &&
3226 TLI.getPointerTy() == MVT::i64) {
3227 // The displacement code could be more than 32 bits away so we need to use
3228 // an instruction with a 64 bit immediate
3229 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV64ri),
3230 ResultReg)
3231 .addGlobalAddress(GV);
3232 } else {
3233 unsigned Opc = TLI.getPointerTy() == MVT::i32
3234 ? (Subtarget->isTarget64BitILP32()
3235 ? X86::LEA64_32r : X86::LEA32r)
3236 : X86::LEA64r;
3237 addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3238 TII.get(Opc), ResultReg), AM);
3239 }
3240 return ResultReg;
3241 }
3242 return 0;
3243}
3244
3245unsigned X86FastISel::fastMaterializeConstant(const Constant *C) {
3246 EVT CEVT = TLI.getValueType(C->getType(), true);
3247
3248 // Only handle simple types.
3249 if (!CEVT.isSimple())
3250 return 0;
3251 MVT VT = CEVT.getSimpleVT();
3252
3253 if (const auto *CI = dyn_cast<ConstantInt>(C))
3254 return X86MaterializeInt(CI, VT);
3255 else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
3256 return X86MaterializeFP(CFP, VT);
3257 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
3258 return X86MaterializeGV(GV, VT);
3259
3260 return 0;
3261}
3262
3263unsigned X86FastISel::fastMaterializeAlloca(const AllocaInst *C) {
3264 // Fail on dynamic allocas. At this point, getRegForValue has already
3265 // checked its CSE maps, so if we're here trying to handle a dynamic
3266 // alloca, we're not going to succeed. X86SelectAddress has a
3267 // check for dynamic allocas, because it's called directly from
3268 // various places, but targetMaterializeAlloca also needs a check
3269 // in order to avoid recursion between getRegForValue,
3270 // X86SelectAddrss, and targetMaterializeAlloca.
3271 if (!FuncInfo.StaticAllocaMap.count(C))
3272 return 0;
3273 assert(C->isStaticAlloca() && "dynamic alloca in the static alloca map?");
3274
3275 X86AddressMode AM;
3276 if (!X86SelectAddress(C, AM))
3277 return 0;
3278 unsigned Opc = TLI.getPointerTy() == MVT::i32
3279 ? (Subtarget->isTarget64BitILP32()
3280 ? X86::LEA64_32r : X86::LEA32r)
3281 : X86::LEA64r;
3282 const TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
3283 unsigned ResultReg = createResultReg(RC);
3284 addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3285 TII.get(Opc), ResultReg), AM);
3286 return ResultReg;
3287}
3288
3289unsigned X86FastISel::fastMaterializeFloatZero(const ConstantFP *CF) {
3290 MVT VT;
3291 if (!isTypeLegal(CF->getType(), VT))
3292 return 0;
3293
3294 // Get opcode and regclass for the given zero.
3295 unsigned Opc = 0;
3296 const TargetRegisterClass *RC = nullptr;
3297 switch (VT.SimpleTy) {
3298 default: return 0;
3299 case MVT::f32:
3300 if (X86ScalarSSEf32) {
3301 Opc = X86::FsFLD0SS;
3302 RC = &X86::FR32RegClass;
3303 } else {
3304 Opc = X86::LD_Fp032;
3305 RC = &X86::RFP32RegClass;
3306 }
3307 break;
3308 case MVT::f64:
3309 if (X86ScalarSSEf64) {
3310 Opc = X86::FsFLD0SD;
3311 RC = &X86::FR64RegClass;
3312 } else {
3313 Opc = X86::LD_Fp064;
3314 RC = &X86::RFP64RegClass;
3315 }
3316 break;
3317 case MVT::f80:
3318 // No f80 support yet.
3319 return 0;
3320 }
3321
3322 unsigned ResultReg = createResultReg(RC);
3323 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg);
3324 return ResultReg;
3325}
3326
3327
3328bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
3329 const LoadInst *LI) {
3330 const Value *Ptr = LI->getPointerOperand();
3331 X86AddressMode AM;
3332 if (!X86SelectAddress(Ptr, AM))
3333 return false;
3334
3335 const X86InstrInfo &XII = (const X86InstrInfo &)TII;
3336
3337 unsigned Size = DL.getTypeAllocSize(LI->getType());
3338 unsigned Alignment = LI->getAlignment();
3339
3340 if (Alignment == 0) // Ensure that codegen never sees alignment 0
3341 Alignment = DL.getABITypeAlignment(LI->getType());
3342
3343 SmallVector<MachineOperand, 8> AddrOps;
3344 AM.getFullAddress(AddrOps);
3345
3346 MachineInstr *Result =
3347 XII.foldMemoryOperandImpl(*FuncInfo.MF, MI, OpNo, AddrOps,
3348 Size, Alignment, /*AllowCommute=*/true);
3349 if (!Result)
3350 return false;
3351
3352 Result->addMemOperand(*FuncInfo.MF, createMachineMemOperandFor(LI));
3353 FuncInfo.MBB->insert(FuncInfo.InsertPt, Result);
3354 MI->eraseFromParent();
3355 return true;
3356}
3357
3358
3359namespace llvm {
3360 FastISel *X86::createFastISel(FunctionLoweringInfo &funcInfo,
3361 const TargetLibraryInfo *libInfo) {
3362 return new X86FastISel(funcInfo, libInfo);
3363 }
3364}