blob: 8db4432734d76626483c439e9ed04feba4a90af2 [file] [log] [blame]
Bill Schmidt646cd792013-07-30 00:50:39 +00001//===-- PPCFastISel.cpp - PowerPC FastISel implementation -----------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the PowerPC-specific support for the FastISel class. Some
11// of the target-specific code is generated by tablegen in the file
12// PPCGenFastISel.inc, which is #included here.
13//
14//===----------------------------------------------------------------------===//
15
16#define DEBUG_TYPE "ppcfastisel"
17#include "PPC.h"
18#include "PPCISelLowering.h"
19#include "PPCSubtarget.h"
20#include "PPCTargetMachine.h"
21#include "MCTargetDesc/PPCPredicates.h"
22#include "llvm/ADT/Optional.h"
23#include "llvm/CodeGen/CallingConvLower.h"
24#include "llvm/CodeGen/FastISel.h"
25#include "llvm/CodeGen/FunctionLoweringInfo.h"
26#include "llvm/CodeGen/MachineConstantPool.h"
27#include "llvm/CodeGen/MachineFrameInfo.h"
28#include "llvm/CodeGen/MachineInstrBuilder.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/IR/CallingConv.h"
31#include "llvm/IR/GlobalAlias.h"
32#include "llvm/IR/GlobalVariable.h"
33#include "llvm/IR/IntrinsicInst.h"
34#include "llvm/IR/Operator.h"
35#include "llvm/Support/Debug.h"
36#include "llvm/Support/GetElementPtrTypeIterator.h"
37#include "llvm/Target/TargetLowering.h"
38#include "llvm/Target/TargetMachine.h"
39
40using namespace llvm;
41
42namespace {
43
44typedef struct Address {
45 enum {
46 RegBase,
47 FrameIndexBase
48 } BaseType;
49
50 union {
51 unsigned Reg;
52 int FI;
53 } Base;
54
55 int Offset;
56
57 // Innocuous defaults for our address.
58 Address()
59 : BaseType(RegBase), Offset(0) {
60 Base.Reg = 0;
61 }
62} Address;
63
64class PPCFastISel : public FastISel {
65
66 const TargetMachine &TM;
67 const TargetInstrInfo &TII;
68 const TargetLowering &TLI;
69 const PPCSubtarget &PPCSubTarget;
70 LLVMContext *Context;
71
72 public:
73 explicit PPCFastISel(FunctionLoweringInfo &FuncInfo,
74 const TargetLibraryInfo *LibInfo)
75 : FastISel(FuncInfo, LibInfo),
76 TM(FuncInfo.MF->getTarget()),
77 TII(*TM.getInstrInfo()),
78 TLI(*TM.getTargetLowering()),
79 PPCSubTarget(
80 *((static_cast<const PPCTargetMachine *>(&TM))->getSubtargetImpl())
81 ),
82 Context(&FuncInfo.Fn->getContext()) { }
83
84 // Backend specific FastISel code.
85 private:
86 virtual bool TargetSelectInstruction(const Instruction *I);
87 virtual unsigned TargetMaterializeConstant(const Constant *C);
88 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI);
89 virtual bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
90 const LoadInst *LI);
91 virtual bool FastLowerArguments();
Bill Schmidt3fad2bc2013-08-25 22:33:42 +000092 virtual unsigned FastEmit_i(MVT Ty, MVT RetTy, unsigned Opc, uint64_t Imm);
93
94 // Instruction selection routines.
95 private:
96 bool SelectBranch(const Instruction *I);
97 bool SelectIndirectBr(const Instruction *I);
Bill Schmidt055d2072013-08-26 19:42:51 +000098 bool SelectRet(const Instruction *I);
99 bool SelectIntExt(const Instruction *I);
Bill Schmidt646cd792013-07-30 00:50:39 +0000100
101 // Utility routines.
102 private:
Bill Schmidt3fad2bc2013-08-25 22:33:42 +0000103 bool PPCEmitCmp(const Value *Src1Value, const Value *Src2Value,
104 bool isZExt, unsigned DestReg);
105 bool PPCEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
106 unsigned DestReg, bool IsZExt);
Bill Schmidt646cd792013-07-30 00:50:39 +0000107 unsigned PPCMaterializeFP(const ConstantFP *CFP, MVT VT);
108 unsigned PPCMaterializeInt(const Constant *C, MVT VT);
109 unsigned PPCMaterialize32BitInt(int64_t Imm,
110 const TargetRegisterClass *RC);
111 unsigned PPCMaterialize64BitInt(int64_t Imm,
112 const TargetRegisterClass *RC);
113
Bill Schmidt055d2072013-08-26 19:42:51 +0000114 // Call handling routines.
115 private:
116 CCAssignFn *usePPC32CCs(unsigned Flag);
117
Bill Schmidt646cd792013-07-30 00:50:39 +0000118 private:
119 #include "PPCGenFastISel.inc"
120
121};
122
123} // end anonymous namespace
124
Bill Schmidt055d2072013-08-26 19:42:51 +0000125#include "PPCGenCallingConv.inc"
126
127// Function whose sole purpose is to kill compiler warnings
128// stemming from unused functions included from PPCGenCallingConv.inc.
129CCAssignFn *PPCFastISel::usePPC32CCs(unsigned Flag) {
130 if (Flag == 1)
131 return CC_PPC32_SVR4;
132 else if (Flag == 2)
133 return CC_PPC32_SVR4_ByVal;
134 else if (Flag == 3)
135 return CC_PPC32_SVR4_VarArg;
136 else
137 return RetCC_PPC;
138}
139
Bill Schmidt3fad2bc2013-08-25 22:33:42 +0000140static Optional<PPC::Predicate> getComparePred(CmpInst::Predicate Pred) {
141 switch (Pred) {
142 // These are not representable with any single compare.
143 case CmpInst::FCMP_FALSE:
144 case CmpInst::FCMP_UEQ:
145 case CmpInst::FCMP_UGT:
146 case CmpInst::FCMP_UGE:
147 case CmpInst::FCMP_ULT:
148 case CmpInst::FCMP_ULE:
149 case CmpInst::FCMP_UNE:
150 case CmpInst::FCMP_TRUE:
151 default:
152 return Optional<PPC::Predicate>();
153
154 case CmpInst::FCMP_OEQ:
155 case CmpInst::ICMP_EQ:
156 return PPC::PRED_EQ;
157
158 case CmpInst::FCMP_OGT:
159 case CmpInst::ICMP_UGT:
160 case CmpInst::ICMP_SGT:
161 return PPC::PRED_GT;
162
163 case CmpInst::FCMP_OGE:
164 case CmpInst::ICMP_UGE:
165 case CmpInst::ICMP_SGE:
166 return PPC::PRED_GE;
167
168 case CmpInst::FCMP_OLT:
169 case CmpInst::ICMP_ULT:
170 case CmpInst::ICMP_SLT:
171 return PPC::PRED_LT;
172
173 case CmpInst::FCMP_OLE:
174 case CmpInst::ICMP_ULE:
175 case CmpInst::ICMP_SLE:
176 return PPC::PRED_LE;
177
178 case CmpInst::FCMP_ONE:
179 case CmpInst::ICMP_NE:
180 return PPC::PRED_NE;
181
182 case CmpInst::FCMP_ORD:
183 return PPC::PRED_NU;
184
185 case CmpInst::FCMP_UNO:
186 return PPC::PRED_UN;
187 }
188}
189
190// Attempt to fast-select a branch instruction.
191bool PPCFastISel::SelectBranch(const Instruction *I) {
192 const BranchInst *BI = cast<BranchInst>(I);
193 MachineBasicBlock *BrBB = FuncInfo.MBB;
194 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
195 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
196
197 // For now, just try the simplest case where it's fed by a compare.
198 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
199 Optional<PPC::Predicate> OptPPCPred = getComparePred(CI->getPredicate());
200 if (!OptPPCPred)
201 return false;
202
203 PPC::Predicate PPCPred = OptPPCPred.getValue();
204
205 // Take advantage of fall-through opportunities.
206 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
207 std::swap(TBB, FBB);
208 PPCPred = PPC::InvertPredicate(PPCPred);
209 }
210
211 unsigned CondReg = createResultReg(&PPC::CRRCRegClass);
212
213 if (!PPCEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned(),
214 CondReg))
215 return false;
216
217 BuildMI(*BrBB, FuncInfo.InsertPt, DL, TII.get(PPC::BCC))
218 .addImm(PPCPred).addReg(CondReg).addMBB(TBB);
219 FastEmitBranch(FBB, DL);
220 FuncInfo.MBB->addSuccessor(TBB);
221 return true;
222
223 } else if (const ConstantInt *CI =
224 dyn_cast<ConstantInt>(BI->getCondition())) {
225 uint64_t Imm = CI->getZExtValue();
226 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB;
227 FastEmitBranch(Target, DL);
228 return true;
229 }
230
231 // FIXME: ARM looks for a case where the block containing the compare
232 // has been split from the block containing the branch. If this happens,
233 // there is a vreg available containing the result of the compare. I'm
234 // not sure we can do much, as we've lost the predicate information with
235 // the compare instruction -- we have a 4-bit CR but don't know which bit
236 // to test here.
237 return false;
238}
239
240// Attempt to emit a compare of the two source values. Signed and unsigned
241// comparisons are supported. Return false if we can't handle it.
242bool PPCFastISel::PPCEmitCmp(const Value *SrcValue1, const Value *SrcValue2,
243 bool IsZExt, unsigned DestReg) {
244 Type *Ty = SrcValue1->getType();
245 EVT SrcEVT = TLI.getValueType(Ty, true);
246 if (!SrcEVT.isSimple())
247 return false;
248 MVT SrcVT = SrcEVT.getSimpleVT();
249
250 // See if operand 2 is an immediate encodeable in the compare.
251 // FIXME: Operands are not in canonical order at -O0, so an immediate
252 // operand in position 1 is a lost opportunity for now. We are
253 // similar to ARM in this regard.
254 long Imm = 0;
255 bool UseImm = false;
256
257 // Only 16-bit integer constants can be represented in compares for
258 // PowerPC. Others will be materialized into a register.
259 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(SrcValue2)) {
260 if (SrcVT == MVT::i64 || SrcVT == MVT::i32 || SrcVT == MVT::i16 ||
261 SrcVT == MVT::i8 || SrcVT == MVT::i1) {
262 const APInt &CIVal = ConstInt->getValue();
263 Imm = (IsZExt) ? (long)CIVal.getZExtValue() : (long)CIVal.getSExtValue();
264 if ((IsZExt && isUInt<16>(Imm)) || (!IsZExt && isInt<16>(Imm)))
265 UseImm = true;
266 }
267 }
268
269 unsigned CmpOpc;
270 bool NeedsExt = false;
271 switch (SrcVT.SimpleTy) {
272 default: return false;
273 case MVT::f32:
274 CmpOpc = PPC::FCMPUS;
275 break;
276 case MVT::f64:
277 CmpOpc = PPC::FCMPUD;
278 break;
279 case MVT::i1:
280 case MVT::i8:
281 case MVT::i16:
282 NeedsExt = true;
283 // Intentional fall-through.
284 case MVT::i32:
285 if (!UseImm)
286 CmpOpc = IsZExt ? PPC::CMPLW : PPC::CMPW;
287 else
288 CmpOpc = IsZExt ? PPC::CMPLWI : PPC::CMPWI;
289 break;
290 case MVT::i64:
291 if (!UseImm)
292 CmpOpc = IsZExt ? PPC::CMPLD : PPC::CMPD;
293 else
294 CmpOpc = IsZExt ? PPC::CMPLDI : PPC::CMPDI;
295 break;
296 }
297
298 unsigned SrcReg1 = getRegForValue(SrcValue1);
299 if (SrcReg1 == 0)
300 return false;
301
302 unsigned SrcReg2 = 0;
303 if (!UseImm) {
304 SrcReg2 = getRegForValue(SrcValue2);
305 if (SrcReg2 == 0)
306 return false;
307 }
308
309 if (NeedsExt) {
310 unsigned ExtReg = createResultReg(&PPC::GPRCRegClass);
311 if (!PPCEmitIntExt(SrcVT, SrcReg1, MVT::i32, ExtReg, IsZExt))
312 return false;
313 SrcReg1 = ExtReg;
314
315 if (!UseImm) {
316 unsigned ExtReg = createResultReg(&PPC::GPRCRegClass);
317 if (!PPCEmitIntExt(SrcVT, SrcReg2, MVT::i32, ExtReg, IsZExt))
318 return false;
319 SrcReg2 = ExtReg;
320 }
321 }
322
323 if (!UseImm)
324 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc), DestReg)
325 .addReg(SrcReg1).addReg(SrcReg2);
326 else
327 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc), DestReg)
328 .addReg(SrcReg1).addImm(Imm);
329
330 return true;
331}
332
Bill Schmidt055d2072013-08-26 19:42:51 +0000333// Attempt to fast-select a return instruction.
334bool PPCFastISel::SelectRet(const Instruction *I) {
335
336 if (!FuncInfo.CanLowerReturn)
337 return false;
338
339 const ReturnInst *Ret = cast<ReturnInst>(I);
340 const Function &F = *I->getParent()->getParent();
341
342 // Build a list of return value registers.
343 SmallVector<unsigned, 4> RetRegs;
344 CallingConv::ID CC = F.getCallingConv();
345
346 if (Ret->getNumOperands() > 0) {
347 SmallVector<ISD::OutputArg, 4> Outs;
348 GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI);
349
350 // Analyze operands of the call, assigning locations to each operand.
351 SmallVector<CCValAssign, 16> ValLocs;
352 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs, *Context);
353 CCInfo.AnalyzeReturn(Outs, RetCC_PPC64_ELF_FIS);
354 const Value *RV = Ret->getOperand(0);
355
356 // FIXME: Only one output register for now.
357 if (ValLocs.size() > 1)
358 return false;
359
360 // Special case for returning a constant integer of any size.
361 // Materialize the constant as an i64 and copy it to the return
362 // register. This avoids an unnecessary extend or truncate.
363 if (isa<ConstantInt>(*RV)) {
364 const Constant *C = cast<Constant>(RV);
365 unsigned SrcReg = PPCMaterializeInt(C, MVT::i64);
366 unsigned RetReg = ValLocs[0].getLocReg();
367 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
368 RetReg).addReg(SrcReg);
369 RetRegs.push_back(RetReg);
370
371 } else {
372 unsigned Reg = getRegForValue(RV);
373
374 if (Reg == 0)
375 return false;
376
377 // Copy the result values into the output registers.
378 for (unsigned i = 0; i < ValLocs.size(); ++i) {
379
380 CCValAssign &VA = ValLocs[i];
381 assert(VA.isRegLoc() && "Can only return in registers!");
382 RetRegs.push_back(VA.getLocReg());
383 unsigned SrcReg = Reg + VA.getValNo();
384
385 EVT RVEVT = TLI.getValueType(RV->getType());
386 if (!RVEVT.isSimple())
387 return false;
388 MVT RVVT = RVEVT.getSimpleVT();
389 MVT DestVT = VA.getLocVT();
390
391 if (RVVT != DestVT && RVVT != MVT::i8 &&
392 RVVT != MVT::i16 && RVVT != MVT::i32)
393 return false;
394
395 if (RVVT != DestVT) {
396 switch (VA.getLocInfo()) {
397 default:
398 llvm_unreachable("Unknown loc info!");
399 case CCValAssign::Full:
400 llvm_unreachable("Full value assign but types don't match?");
401 case CCValAssign::AExt:
402 case CCValAssign::ZExt: {
403 const TargetRegisterClass *RC =
404 (DestVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
405 unsigned TmpReg = createResultReg(RC);
406 if (!PPCEmitIntExt(RVVT, SrcReg, DestVT, TmpReg, true))
407 return false;
408 SrcReg = TmpReg;
409 break;
410 }
411 case CCValAssign::SExt: {
412 const TargetRegisterClass *RC =
413 (DestVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
414 unsigned TmpReg = createResultReg(RC);
415 if (!PPCEmitIntExt(RVVT, SrcReg, DestVT, TmpReg, false))
416 return false;
417 SrcReg = TmpReg;
418 break;
419 }
420 }
421 }
422
423 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
424 TII.get(TargetOpcode::COPY), RetRegs[i])
425 .addReg(SrcReg);
426 }
427 }
428 }
429
430 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
431 TII.get(PPC::BLR));
432
433 for (unsigned i = 0, e = RetRegs.size(); i != e; ++i)
434 MIB.addReg(RetRegs[i], RegState::Implicit);
435
436 return true;
437}
438
Bill Schmidt3fad2bc2013-08-25 22:33:42 +0000439// Attempt to emit an integer extend of SrcReg into DestReg. Both
440// signed and zero extensions are supported. Return false if we
Bill Schmidt055d2072013-08-26 19:42:51 +0000441// can't handle it.
Bill Schmidt3fad2bc2013-08-25 22:33:42 +0000442bool PPCFastISel::PPCEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
443 unsigned DestReg, bool IsZExt) {
Bill Schmidt055d2072013-08-26 19:42:51 +0000444 if (DestVT != MVT::i32 && DestVT != MVT::i64)
445 return false;
446 if (SrcVT != MVT::i8 && SrcVT != MVT::i16 && SrcVT != MVT::i32)
447 return false;
448
449 // Signed extensions use EXTSB, EXTSH, EXTSW.
450 if (!IsZExt) {
451 unsigned Opc;
452 if (SrcVT == MVT::i8)
453 Opc = (DestVT == MVT::i32) ? PPC::EXTSB : PPC::EXTSB8_32_64;
454 else if (SrcVT == MVT::i16)
455 Opc = (DestVT == MVT::i32) ? PPC::EXTSH : PPC::EXTSH8_32_64;
456 else {
457 assert(DestVT == MVT::i64 && "Signed extend from i32 to i32??");
458 Opc = PPC::EXTSW_32_64;
459 }
460 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
461 .addReg(SrcReg);
462
463 // Unsigned 32-bit extensions use RLWINM.
464 } else if (DestVT == MVT::i32) {
465 unsigned MB;
466 if (SrcVT == MVT::i8)
467 MB = 24;
468 else {
469 assert(SrcVT == MVT::i16 && "Unsigned extend from i32 to i32??");
470 MB = 16;
471 }
472 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::RLWINM),
473 DestReg)
474 .addReg(SrcReg).addImm(/*SH=*/0).addImm(MB).addImm(/*ME=*/31);
475
476 // Unsigned 64-bit extensions use RLDICL (with a 32-bit source).
477 } else {
478 unsigned MB;
479 if (SrcVT == MVT::i8)
480 MB = 56;
481 else if (SrcVT == MVT::i16)
482 MB = 48;
483 else
484 MB = 32;
485 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
486 TII.get(PPC::RLDICL_32_64), DestReg)
487 .addReg(SrcReg).addImm(/*SH=*/0).addImm(MB);
488 }
489
490 return true;
Bill Schmidt3fad2bc2013-08-25 22:33:42 +0000491}
492
493// Attempt to fast-select an indirect branch instruction.
494bool PPCFastISel::SelectIndirectBr(const Instruction *I) {
495 unsigned AddrReg = getRegForValue(I->getOperand(0));
496 if (AddrReg == 0)
497 return false;
498
499 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::MTCTR8))
500 .addReg(AddrReg);
501 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::BCTR8));
502
503 const IndirectBrInst *IB = cast<IndirectBrInst>(I);
504 for (unsigned i = 0, e = IB->getNumSuccessors(); i != e; ++i)
505 FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[IB->getSuccessor(i)]);
506
507 return true;
508}
509
Bill Schmidt055d2072013-08-26 19:42:51 +0000510// Attempt to fast-select an integer extend instruction.
511bool PPCFastISel::SelectIntExt(const Instruction *I) {
512 Type *DestTy = I->getType();
513 Value *Src = I->getOperand(0);
514 Type *SrcTy = Src->getType();
515
516 bool IsZExt = isa<ZExtInst>(I);
517 unsigned SrcReg = getRegForValue(Src);
518 if (!SrcReg) return false;
519
520 EVT SrcEVT, DestEVT;
521 SrcEVT = TLI.getValueType(SrcTy, true);
522 DestEVT = TLI.getValueType(DestTy, true);
523 if (!SrcEVT.isSimple())
524 return false;
525 if (!DestEVT.isSimple())
526 return false;
527
528 MVT SrcVT = SrcEVT.getSimpleVT();
529 MVT DestVT = DestEVT.getSimpleVT();
530
531 // If we know the register class needed for the result of this
532 // instruction, use it. Otherwise pick the register class of the
533 // correct size that does not contain X0/R0, since we don't know
534 // whether downstream uses permit that assignment.
535 unsigned AssignedReg = FuncInfo.ValueMap[I];
536 const TargetRegisterClass *RC =
537 (AssignedReg ? MRI.getRegClass(AssignedReg) :
538 (DestVT == MVT::i64 ? &PPC::G8RC_and_G8RC_NOX0RegClass :
539 &PPC::GPRC_and_GPRC_NOR0RegClass));
540 unsigned ResultReg = createResultReg(RC);
541
542 if (!PPCEmitIntExt(SrcVT, SrcReg, DestVT, ResultReg, IsZExt))
543 return false;
544
545 UpdateValueMap(I, ResultReg);
546 return true;
547}
548
Bill Schmidt646cd792013-07-30 00:50:39 +0000549// Attempt to fast-select an instruction that wasn't handled by
Bill Schmidt3fad2bc2013-08-25 22:33:42 +0000550// the table-generated machinery.
Bill Schmidt646cd792013-07-30 00:50:39 +0000551bool PPCFastISel::TargetSelectInstruction(const Instruction *I) {
Bill Schmidt3fad2bc2013-08-25 22:33:42 +0000552
553 switch (I->getOpcode()) {
554 case Instruction::Br:
555 return SelectBranch(I);
556 case Instruction::IndirectBr:
557 return SelectIndirectBr(I);
Bill Schmidt055d2072013-08-26 19:42:51 +0000558 case Instruction::Ret:
559 return SelectRet(I);
560 case Instruction::ZExt:
561 case Instruction::SExt:
562 return SelectIntExt(I);
Bill Schmidt3fad2bc2013-08-25 22:33:42 +0000563 // Here add other flavors of Instruction::XXX that automated
564 // cases don't catch. For example, switches are terminators
565 // that aren't yet handled.
566 default:
567 break;
568 }
569 return false;
Bill Schmidt646cd792013-07-30 00:50:39 +0000570}
571
572// Materialize a floating-point constant into a register, and return
573// the register number (or zero if we failed to handle it).
574unsigned PPCFastISel::PPCMaterializeFP(const ConstantFP *CFP, MVT VT) {
575 // No plans to handle long double here.
576 if (VT != MVT::f32 && VT != MVT::f64)
577 return 0;
578
579 // All FP constants are loaded from the constant pool.
580 unsigned Align = TD.getPrefTypeAlignment(CFP->getType());
581 assert(Align > 0 && "Unexpectedly missing alignment information!");
582 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align);
583 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
584 CodeModel::Model CModel = TM.getCodeModel();
585
586 MachineMemOperand *MMO =
587 FuncInfo.MF->getMachineMemOperand(
588 MachinePointerInfo::getConstantPool(), MachineMemOperand::MOLoad,
589 (VT == MVT::f32) ? 4 : 8, Align);
590
Bill Schmidt3fad2bc2013-08-25 22:33:42 +0000591 unsigned Opc = (VT == MVT::f32) ? PPC::LFS : PPC::LFD;
592 unsigned TmpReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
593
594 // For small code model, generate a LF[SD](0, LDtocCPT(Idx, X2)).
595 if (CModel == CodeModel::Small || CModel == CodeModel::JITDefault) {
Bill Schmidt646cd792013-07-30 00:50:39 +0000596 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::LDtocCPT),
Bill Schmidt3fad2bc2013-08-25 22:33:42 +0000597 TmpReg)
598 .addConstantPoolIndex(Idx).addReg(PPC::X2);
599 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
600 .addImm(0).addReg(TmpReg).addMemOperand(MMO);
601 } else {
Bill Schmidt646cd792013-07-30 00:50:39 +0000602 // Otherwise we generate LF[SD](Idx[lo], ADDIStocHA(X2, Idx)).
Bill Schmidt646cd792013-07-30 00:50:39 +0000603 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ADDIStocHA),
604 TmpReg).addReg(PPC::X2).addConstantPoolIndex(Idx);
605 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
606 .addConstantPoolIndex(Idx, 0, PPCII::MO_TOC_LO)
607 .addReg(TmpReg)
608 .addMemOperand(MMO);
609 }
610
611 return DestReg;
612}
613
614// Materialize a 32-bit integer constant into a register, and return
615// the register number (or zero if we failed to handle it).
616unsigned PPCFastISel::PPCMaterialize32BitInt(int64_t Imm,
617 const TargetRegisterClass *RC) {
618 unsigned Lo = Imm & 0xFFFF;
619 unsigned Hi = (Imm >> 16) & 0xFFFF;
620
621 unsigned ResultReg = createResultReg(RC);
622 bool IsGPRC = RC->hasSuperClassEq(&PPC::GPRCRegClass);
623
624 if (isInt<16>(Imm))
625 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
626 TII.get(IsGPRC ? PPC::LI : PPC::LI8), ResultReg)
627 .addImm(Imm);
628 else if (Lo) {
629 // Both Lo and Hi have nonzero bits.
630 unsigned TmpReg = createResultReg(RC);
631 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
632 TII.get(IsGPRC ? PPC::LIS : PPC::LIS8), TmpReg)
633 .addImm(Hi);
634 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
635 TII.get(IsGPRC ? PPC::ORI : PPC::ORI8), ResultReg)
636 .addReg(TmpReg).addImm(Lo);
637 } else
638 // Just Hi bits.
639 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
640 TII.get(IsGPRC ? PPC::LIS : PPC::LIS8), ResultReg)
641 .addImm(Hi);
642
643 return ResultReg;
644}
645
646// Materialize a 64-bit integer constant into a register, and return
647// the register number (or zero if we failed to handle it).
648unsigned PPCFastISel::PPCMaterialize64BitInt(int64_t Imm,
649 const TargetRegisterClass *RC) {
650 unsigned Remainder = 0;
651 unsigned Shift = 0;
652
653 // If the value doesn't fit in 32 bits, see if we can shift it
654 // so that it fits in 32 bits.
655 if (!isInt<32>(Imm)) {
656 Shift = countTrailingZeros<uint64_t>(Imm);
657 int64_t ImmSh = static_cast<uint64_t>(Imm) >> Shift;
658
659 if (isInt<32>(ImmSh))
660 Imm = ImmSh;
661 else {
662 Remainder = Imm;
663 Shift = 32;
664 Imm >>= 32;
665 }
666 }
667
668 // Handle the high-order 32 bits (if shifted) or the whole 32 bits
669 // (if not shifted).
670 unsigned TmpReg1 = PPCMaterialize32BitInt(Imm, RC);
671 if (!Shift)
672 return TmpReg1;
673
674 // If upper 32 bits were not zero, we've built them and need to shift
675 // them into place.
676 unsigned TmpReg2;
677 if (Imm) {
678 TmpReg2 = createResultReg(RC);
679 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::RLDICR),
680 TmpReg2).addReg(TmpReg1).addImm(Shift).addImm(63 - Shift);
681 } else
682 TmpReg2 = TmpReg1;
683
684 unsigned TmpReg3, Hi, Lo;
685 if ((Hi = (Remainder >> 16) & 0xFFFF)) {
686 TmpReg3 = createResultReg(RC);
687 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ORIS8),
688 TmpReg3).addReg(TmpReg2).addImm(Hi);
689 } else
690 TmpReg3 = TmpReg2;
691
692 if ((Lo = Remainder & 0xFFFF)) {
693 unsigned ResultReg = createResultReg(RC);
694 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ORI8),
695 ResultReg).addReg(TmpReg3).addImm(Lo);
696 return ResultReg;
697 }
698
699 return TmpReg3;
700}
701
702
703// Materialize an integer constant into a register, and return
704// the register number (or zero if we failed to handle it).
705unsigned PPCFastISel::PPCMaterializeInt(const Constant *C, MVT VT) {
706
707 if (VT != MVT::i64 && VT != MVT::i32 && VT != MVT::i16 &&
708 VT != MVT::i8 && VT != MVT::i1)
709 return 0;
710
711 const TargetRegisterClass *RC = ((VT == MVT::i64) ? &PPC::G8RCRegClass :
712 &PPC::GPRCRegClass);
713
714 // If the constant is in range, use a load-immediate.
715 const ConstantInt *CI = cast<ConstantInt>(C);
716 if (isInt<16>(CI->getSExtValue())) {
717 unsigned Opc = (VT == MVT::i64) ? PPC::LI8 : PPC::LI;
718 unsigned ImmReg = createResultReg(RC);
719 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ImmReg)
720 .addImm(CI->getSExtValue());
721 return ImmReg;
722 }
723
724 // Construct the constant piecewise.
725 int64_t Imm = CI->getZExtValue();
726
727 if (VT == MVT::i64)
728 return PPCMaterialize64BitInt(Imm, RC);
729 else if (VT == MVT::i32)
730 return PPCMaterialize32BitInt(Imm, RC);
731
732 return 0;
733}
734
735// Materialize a constant into a register, and return the register
736// number (or zero if we failed to handle it).
737unsigned PPCFastISel::TargetMaterializeConstant(const Constant *C) {
738 EVT CEVT = TLI.getValueType(C->getType(), true);
739
740 // Only handle simple types.
741 if (!CEVT.isSimple()) return 0;
742 MVT VT = CEVT.getSimpleVT();
743
744 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
745 return PPCMaterializeFP(CFP, VT);
746 else if (isa<ConstantInt>(C))
747 return PPCMaterializeInt(C, VT);
748 // TBD: Global values.
749
750 return 0;
751}
752
753// Materialize the address created by an alloca into a register, and
754// return the register number (or zero if we failed to handle it). TBD.
755unsigned PPCFastISel::TargetMaterializeAlloca(const AllocaInst *AI) {
756 return AI && 0;
757}
758
759// Fold loads into extends when possible. TBD.
760bool PPCFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
761 const LoadInst *LI) {
762 return MI && OpNo && LI && false;
763}
764
765// Attempt to lower call arguments in a faster way than done by
766// the selection DAG code.
767bool PPCFastISel::FastLowerArguments() {
768 // Defer to normal argument lowering for now. It's reasonably
769 // efficient. Consider doing something like ARM to handle the
770 // case where all args fit in registers, no varargs, no float
771 // or vector args.
772 return false;
773}
774
Bill Schmidt3fad2bc2013-08-25 22:33:42 +0000775// Handle materializing integer constants into a register. This is not
776// automatically generated for PowerPC, so must be explicitly created here.
777unsigned PPCFastISel::FastEmit_i(MVT Ty, MVT VT, unsigned Opc, uint64_t Imm) {
778
779 if (Opc != ISD::Constant)
780 return 0;
781
782 if (VT != MVT::i64 && VT != MVT::i32 && VT != MVT::i16 &&
783 VT != MVT::i8 && VT != MVT::i1)
784 return 0;
785
786 const TargetRegisterClass *RC = ((VT == MVT::i64) ? &PPC::G8RCRegClass :
787 &PPC::GPRCRegClass);
788 if (VT == MVT::i64)
789 return PPCMaterialize64BitInt(Imm, RC);
790 else
791 return PPCMaterialize32BitInt(Imm, RC);
792}
793
Bill Schmidt646cd792013-07-30 00:50:39 +0000794namespace llvm {
795 // Create the fast instruction selector for PowerPC64 ELF.
796 FastISel *PPC::createFastISel(FunctionLoweringInfo &FuncInfo,
797 const TargetLibraryInfo *LibInfo) {
798 const TargetMachine &TM = FuncInfo.MF->getTarget();
799
800 // Only available on 64-bit ELF for now.
801 const PPCSubtarget *Subtarget = &TM.getSubtarget<PPCSubtarget>();
802 if (Subtarget->isPPC64() && Subtarget->isSVR4ABI())
803 return new PPCFastISel(FuncInfo, LibInfo);
804
805 return 0;
806 }
807}