blob: 112e5663e759c5d19b53c26e2abfc3ce54499cc8 [file] [log] [blame]
Reed Kotler720c5ca2014-04-17 22:15:34 +00001//===-- MipsastISel.cpp - Mips FastISel implementation
2//---------------------===//
3
Chandler Carruthd9903882015-01-14 11:23:27 +00004#include "MipsCCState.h"
5#include "MipsISelLowering.h"
6#include "MipsMachineFunction.h"
7#include "MipsRegisterInfo.h"
8#include "MipsSubtarget.h"
9#include "MipsTargetMachine.h"
Chandler Carruth62d42152015-01-15 02:16:27 +000010#include "llvm/Analysis/TargetLibraryInfo.h"
Reed Kotler720c5ca2014-04-17 22:15:34 +000011#include "llvm/CodeGen/FastISel.h"
Reed Kotleraa150ed2015-02-12 21:05:12 +000012#include "llvm/CodeGen/FunctionLoweringInfo.h"
Reed Kotler67077b32014-04-29 17:57:50 +000013#include "llvm/CodeGen/MachineInstrBuilder.h"
Reed Kotleraa150ed2015-02-12 21:05:12 +000014#include "llvm/CodeGen/MachineRegisterInfo.h"
Reed Kotlerbab3f232014-05-01 20:39:21 +000015#include "llvm/IR/GlobalAlias.h"
16#include "llvm/IR/GlobalVariable.h"
Reed Kotler67077b32014-04-29 17:57:50 +000017#include "llvm/Target/TargetInstrInfo.h"
Reed Kotler720c5ca2014-04-17 22:15:34 +000018
19using namespace llvm;
20
21namespace {
22
23class MipsFastISel final : public FastISel {
24
Reed Kotlera562b462014-10-13 21:46:41 +000025 // All possible address modes.
26 class Address {
27 public:
28 typedef enum { RegBase, FrameIndexBase } BaseKind;
29
30 private:
31 BaseKind Kind;
32 union {
33 unsigned Reg;
34 int FI;
35 } Base;
36
37 int64_t Offset;
38
39 const GlobalValue *GV;
40
41 public:
42 // Innocuous defaults for our address.
43 Address() : Kind(RegBase), Offset(0), GV(0) { Base.Reg = 0; }
44 void setKind(BaseKind K) { Kind = K; }
45 BaseKind getKind() const { return Kind; }
46 bool isRegBase() const { return Kind == RegBase; }
47 void setReg(unsigned Reg) {
48 assert(isRegBase() && "Invalid base register access!");
49 Base.Reg = Reg;
50 }
51 unsigned getReg() const {
52 assert(isRegBase() && "Invalid base register access!");
53 return Base.Reg;
54 }
55 void setOffset(int64_t Offset_) { Offset = Offset_; }
56 int64_t getOffset() const { return Offset; }
57 void setGlobalValue(const GlobalValue *G) { GV = G; }
58 const GlobalValue *getGlobalValue() { return GV; }
59 };
60
Reed Kotler67077b32014-04-29 17:57:50 +000061 /// Subtarget - Keep a pointer to the MipsSubtarget around so that we can
62 /// make the right decision when generating code for different targets.
Reed Kotler67077b32014-04-29 17:57:50 +000063 const TargetMachine &TM;
Eric Christopher96e72c62015-01-29 23:27:36 +000064 const MipsSubtarget *Subtarget;
Reed Kotler67077b32014-04-29 17:57:50 +000065 const TargetInstrInfo &TII;
66 const TargetLowering &TLI;
67 MipsFunctionInfo *MFI;
68
69 // Convenience variables to avoid some queries.
70 LLVMContext *Context;
71
Reed Kotlerd5c41962014-11-13 23:37:45 +000072 bool fastLowerCall(CallLoweringInfo &CLI) override;
73
Reed Kotler67077b32014-04-29 17:57:50 +000074 bool TargetSupported;
Reed Kotlera562b462014-10-13 21:46:41 +000075 bool UnsupportedFPMode; // To allow fast-isel to proceed and just not handle
76 // floating point but not reject doing fast-isel in other
77 // situations
78
79private:
80 // Selection routines.
81 bool selectLoad(const Instruction *I);
82 bool selectStore(const Instruction *I);
83 bool selectBranch(const Instruction *I);
84 bool selectCmp(const Instruction *I);
85 bool selectFPExt(const Instruction *I);
86 bool selectFPTrunc(const Instruction *I);
87 bool selectFPToInt(const Instruction *I, bool IsSigned);
88 bool selectRet(const Instruction *I);
89 bool selectTrunc(const Instruction *I);
90 bool selectIntExt(const Instruction *I);
91
92 // Utility helper routines.
Reed Kotlera562b462014-10-13 21:46:41 +000093 bool isTypeLegal(Type *Ty, MVT &VT);
94 bool isLoadTypeLegal(Type *Ty, MVT &VT);
95 bool computeAddress(const Value *Obj, Address &Addr);
Reed Kotlerd5c41962014-11-13 23:37:45 +000096 bool computeCallAddress(const Value *V, Address &Addr);
Reed Kotlera562b462014-10-13 21:46:41 +000097
98 // Emit helper routines.
99 bool emitCmp(unsigned DestReg, const CmpInst *CI);
100 bool emitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
101 unsigned Alignment = 0);
Reed Kotlerd5c41962014-11-13 23:37:45 +0000102 bool emitStore(MVT VT, unsigned SrcReg, Address Addr,
103 MachineMemOperand *MMO = nullptr);
Reed Kotlera562b462014-10-13 21:46:41 +0000104 bool emitStore(MVT VT, unsigned SrcReg, Address &Addr,
105 unsigned Alignment = 0);
Reed Kotlerd5c41962014-11-13 23:37:45 +0000106 unsigned emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt);
Reed Kotlera562b462014-10-13 21:46:41 +0000107 bool emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg,
108
109 bool IsZExt);
110 bool emitIntZExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg);
111
112 bool emitIntSExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg);
113 bool emitIntSExt32r1(MVT SrcVT, unsigned SrcReg, MVT DestVT,
114 unsigned DestReg);
115 bool emitIntSExt32r2(MVT SrcVT, unsigned SrcReg, MVT DestVT,
116 unsigned DestReg);
117
118 unsigned getRegEnsuringSimpleIntegerWidening(const Value *, bool IsUnsigned);
119
120 unsigned materializeFP(const ConstantFP *CFP, MVT VT);
121 unsigned materializeGV(const GlobalValue *GV, MVT VT);
122 unsigned materializeInt(const Constant *C, MVT VT);
123 unsigned materialize32BitInt(int64_t Imm, const TargetRegisterClass *RC);
124
125 MachineInstrBuilder emitInst(unsigned Opc) {
126 return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc));
127 }
128 MachineInstrBuilder emitInst(unsigned Opc, unsigned DstReg) {
129 return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
130 DstReg);
131 }
132 MachineInstrBuilder emitInstStore(unsigned Opc, unsigned SrcReg,
133 unsigned MemReg, int64_t MemOffset) {
134 return emitInst(Opc).addReg(SrcReg).addReg(MemReg).addImm(MemOffset);
135 }
136 MachineInstrBuilder emitInstLoad(unsigned Opc, unsigned DstReg,
137 unsigned MemReg, int64_t MemOffset) {
138 return emitInst(Opc, DstReg).addReg(MemReg).addImm(MemOffset);
139 }
140 // for some reason, this default is not generated by tablegen
141 // so we explicitly generate it here.
142 //
143 unsigned fastEmitInst_riir(uint64_t inst, const TargetRegisterClass *RC,
144 unsigned Op0, bool Op0IsKill, uint64_t imm1,
145 uint64_t imm2, unsigned Op3, bool Op3IsKill) {
146 return 0;
147 }
Reed Kotler67077b32014-04-29 17:57:50 +0000148
Reed Kotlerd5c41962014-11-13 23:37:45 +0000149 // Call handling routines.
150private:
151 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC) const;
152 bool processCallArgs(CallLoweringInfo &CLI, SmallVectorImpl<MVT> &ArgVTs,
153 unsigned &NumBytes);
154 bool finishCall(CallLoweringInfo &CLI, MVT RetVT, unsigned NumBytes);
155
Reed Kotler720c5ca2014-04-17 22:15:34 +0000156public:
Reed Kotlera562b462014-10-13 21:46:41 +0000157 // Backend specific FastISel code.
Reed Kotler720c5ca2014-04-17 22:15:34 +0000158 explicit MipsFastISel(FunctionLoweringInfo &funcInfo,
159 const TargetLibraryInfo *libInfo)
Eric Christopher3ab98892014-12-20 00:07:09 +0000160 : FastISel(funcInfo, libInfo), TM(funcInfo.MF->getTarget()),
Eric Christopher96e72c62015-01-29 23:27:36 +0000161 Subtarget(
162 &static_cast<const MipsSubtarget &>(funcInfo.MF->getSubtarget())),
163 TII(*Subtarget->getInstrInfo()), TLI(*Subtarget->getTargetLowering()) {
Reed Kotler67077b32014-04-29 17:57:50 +0000164 MFI = funcInfo.MF->getInfo<MipsFunctionInfo>();
165 Context = &funcInfo.Fn->getContext();
Eric Christopherd86af632015-01-29 23:27:45 +0000166 TargetSupported =
167 ((TM.getRelocationModel() == Reloc::PIC_) &&
168 ((Subtarget->hasMips32r2() || Subtarget->hasMips32()) &&
169 (static_cast<const MipsTargetMachine &>(TM).getABI().IsO32())));
Reed Kotler12f94882014-10-10 17:00:46 +0000170 UnsupportedFPMode = Subtarget->isFP64bit();
Reed Kotler67077b32014-04-29 17:57:50 +0000171 }
172
Juergen Ributzka5b8bb4d2014-09-03 20:56:52 +0000173 unsigned fastMaterializeConstant(const Constant *C) override;
Reed Kotlera562b462014-10-13 21:46:41 +0000174 bool fastSelectInstruction(const Instruction *I) override;
Reed Kotler9fe3bfd2014-06-16 22:05:47 +0000175
Reed Kotler9fe25f32014-06-08 02:08:43 +0000176#include "MipsGenFastISel.inc"
Reed Kotler720c5ca2014-04-17 22:15:34 +0000177};
Reed Kotlera562b462014-10-13 21:46:41 +0000178} // end anonymous namespace.
Reed Kotler67077b32014-04-29 17:57:50 +0000179
Reed Kotlerd5c41962014-11-13 23:37:45 +0000180static bool CC_Mips(unsigned ValNo, MVT ValVT, MVT LocVT,
181 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
Reid Klecknerd3781742014-11-14 00:39:33 +0000182 CCState &State) LLVM_ATTRIBUTE_UNUSED;
Reed Kotlerd5c41962014-11-13 23:37:45 +0000183
184static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT, MVT LocVT,
185 CCValAssign::LocInfo LocInfo,
186 ISD::ArgFlagsTy ArgFlags, CCState &State) {
187 llvm_unreachable("should not be called");
188}
189
Benjamin Kramer970eac42015-02-06 17:51:54 +0000190static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT, MVT LocVT,
191 CCValAssign::LocInfo LocInfo,
192 ISD::ArgFlagsTy ArgFlags, CCState &State) {
Reed Kotlerd5c41962014-11-13 23:37:45 +0000193 llvm_unreachable("should not be called");
194}
195
196#include "MipsGenCallingConv.inc"
197
198CCAssignFn *MipsFastISel::CCAssignFnForCall(CallingConv::ID CC) const {
199 return CC_MipsO32;
200}
201
Reed Kotlerd4ea29e2014-10-14 18:27:58 +0000202unsigned MipsFastISel::materializeInt(const Constant *C, MVT VT) {
203 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1)
Reed Kotler497311a2014-10-10 17:39:51 +0000204 return 0;
Reed Kotlerd4ea29e2014-10-14 18:27:58 +0000205 const TargetRegisterClass *RC = &Mips::GPR32RegClass;
206 const ConstantInt *CI = cast<ConstantInt>(C);
207 int64_t Imm;
208 if ((VT != MVT::i1) && CI->isNegative())
209 Imm = CI->getSExtValue();
210 else
211 Imm = CI->getZExtValue();
212 return materialize32BitInt(Imm, RC);
Reed Kotler497311a2014-10-10 17:39:51 +0000213}
214
Reed Kotlerd4ea29e2014-10-14 18:27:58 +0000215unsigned MipsFastISel::materialize32BitInt(int64_t Imm,
216 const TargetRegisterClass *RC) {
217 unsigned ResultReg = createResultReg(RC);
218
219 if (isInt<16>(Imm)) {
220 unsigned Opc = Mips::ADDiu;
221 emitInst(Opc, ResultReg).addReg(Mips::ZERO).addImm(Imm);
222 return ResultReg;
223 } else if (isUInt<16>(Imm)) {
224 emitInst(Mips::ORi, ResultReg).addReg(Mips::ZERO).addImm(Imm);
225 return ResultReg;
Reed Kotler9fe3bfd2014-06-16 22:05:47 +0000226 }
Reed Kotlerd4ea29e2014-10-14 18:27:58 +0000227 unsigned Lo = Imm & 0xFFFF;
228 unsigned Hi = (Imm >> 16) & 0xFFFF;
229 if (Lo) {
230 // Both Lo and Hi have nonzero bits.
231 unsigned TmpReg = createResultReg(RC);
232 emitInst(Mips::LUi, TmpReg).addImm(Hi);
233 emitInst(Mips::ORi, ResultReg).addReg(TmpReg).addImm(Lo);
234 } else {
235 emitInst(Mips::LUi, ResultReg).addImm(Hi);
Reed Kotler9fe3bfd2014-06-16 22:05:47 +0000236 }
Reed Kotlerd4ea29e2014-10-14 18:27:58 +0000237 return ResultReg;
238}
239
240unsigned MipsFastISel::materializeFP(const ConstantFP *CFP, MVT VT) {
241 if (UnsupportedFPMode)
242 return 0;
243 int64_t Imm = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
244 if (VT == MVT::f32) {
245 const TargetRegisterClass *RC = &Mips::FGR32RegClass;
246 unsigned DestReg = createResultReg(RC);
247 unsigned TempReg = materialize32BitInt(Imm, &Mips::GPR32RegClass);
248 emitInst(Mips::MTC1, DestReg).addReg(TempReg);
249 return DestReg;
250 } else if (VT == MVT::f64) {
251 const TargetRegisterClass *RC = &Mips::AFGR64RegClass;
252 unsigned DestReg = createResultReg(RC);
253 unsigned TempReg1 = materialize32BitInt(Imm >> 32, &Mips::GPR32RegClass);
254 unsigned TempReg2 =
255 materialize32BitInt(Imm & 0xFFFFFFFF, &Mips::GPR32RegClass);
256 emitInst(Mips::BuildPairF64, DestReg).addReg(TempReg2).addReg(TempReg1);
257 return DestReg;
Reed Kotler9fe3bfd2014-06-16 22:05:47 +0000258 }
Reed Kotlerd4ea29e2014-10-14 18:27:58 +0000259 return 0;
260}
261
262unsigned MipsFastISel::materializeGV(const GlobalValue *GV, MVT VT) {
263 // For now 32-bit only.
264 if (VT != MVT::i32)
265 return 0;
266 const TargetRegisterClass *RC = &Mips::GPR32RegClass;
267 unsigned DestReg = createResultReg(RC);
268 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
269 bool IsThreadLocal = GVar && GVar->isThreadLocal();
270 // TLS not supported at this time.
271 if (IsThreadLocal)
272 return 0;
273 emitInst(Mips::LW, DestReg)
274 .addReg(MFI->getGlobalBaseReg())
275 .addGlobalAddress(GV, 0, MipsII::MO_GOT);
276 if ((GV->hasInternalLinkage() ||
277 (GV->hasLocalLinkage() && !isa<Function>(GV)))) {
278 unsigned TempReg = createResultReg(RC);
279 emitInst(Mips::ADDiu, TempReg)
280 .addReg(DestReg)
281 .addGlobalAddress(GV, 0, MipsII::MO_ABS_LO);
282 DestReg = TempReg;
Reed Kotler9fe3bfd2014-06-16 22:05:47 +0000283 }
Reed Kotlerd4ea29e2014-10-14 18:27:58 +0000284 return DestReg;
Reed Kotler9fe3bfd2014-06-16 22:05:47 +0000285}
286
Reed Kotlerbab3f232014-05-01 20:39:21 +0000287// Materialize a constant into a register, and return the register
288// number (or zero if we failed to handle it).
Juergen Ributzka5b8bb4d2014-09-03 20:56:52 +0000289unsigned MipsFastISel::fastMaterializeConstant(const Constant *C) {
Reed Kotlerbab3f232014-05-01 20:39:21 +0000290 EVT CEVT = TLI.getValueType(C->getType(), true);
291
292 // Only handle simple types.
293 if (!CEVT.isSimple())
294 return 0;
295 MVT VT = CEVT.getSimpleVT();
296
297 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
Reed Kotlera562b462014-10-13 21:46:41 +0000298 return (UnsupportedFPMode) ? 0 : materializeFP(CFP, VT);
Reed Kotlerbab3f232014-05-01 20:39:21 +0000299 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
Reed Kotlera562b462014-10-13 21:46:41 +0000300 return materializeGV(GV, VT);
Reed Kotlerbab3f232014-05-01 20:39:21 +0000301 else if (isa<ConstantInt>(C))
Reed Kotlera562b462014-10-13 21:46:41 +0000302 return materializeInt(C, VT);
Reed Kotlerbab3f232014-05-01 20:39:21 +0000303
304 return 0;
305}
306
Reed Kotlerd4ea29e2014-10-14 18:27:58 +0000307bool MipsFastISel::computeAddress(const Value *Obj, Address &Addr) {
308 // This construct looks a big awkward but it is how other ports handle this
309 // and as this function is more fully completed, these cases which
310 // return false will have additional code in them.
Reed Kotlerbab3f232014-05-01 20:39:21 +0000311 //
Reed Kotlerd4ea29e2014-10-14 18:27:58 +0000312 if (isa<Instruction>(Obj))
Reed Kotlerbab3f232014-05-01 20:39:21 +0000313 return false;
Reed Kotlerd4ea29e2014-10-14 18:27:58 +0000314 else if (isa<ConstantExpr>(Obj))
Reed Kotler3ebdcc92014-09-30 16:30:13 +0000315 return false;
Reed Kotlerd4ea29e2014-10-14 18:27:58 +0000316 Addr.setReg(getRegForValue(Obj));
317 return Addr.getReg() != 0;
Reed Kotler3ebdcc92014-09-30 16:30:13 +0000318}
319
Reed Kotlerd5c41962014-11-13 23:37:45 +0000320bool MipsFastISel::computeCallAddress(const Value *V, Address &Addr) {
321 const GlobalValue *GV = dyn_cast<GlobalValue>(V);
322 if (GV && isa<Function>(GV) && dyn_cast<Function>(GV)->isIntrinsic())
323 return false;
324 if (!GV)
325 return false;
326 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
327 Addr.setGlobalValue(GV);
328 return true;
329 }
330 return false;
331}
332
Reed Kotlerd4ea29e2014-10-14 18:27:58 +0000333bool MipsFastISel::isTypeLegal(Type *Ty, MVT &VT) {
334 EVT evt = TLI.getValueType(Ty, true);
335 // Only handle simple types.
336 if (evt == MVT::Other || !evt.isSimple())
Reed Kotler3ebdcc92014-09-30 16:30:13 +0000337 return false;
Reed Kotlerd4ea29e2014-10-14 18:27:58 +0000338 VT = evt.getSimpleVT();
339
340 // Handle all legal types, i.e. a register that will directly hold this
341 // value.
342 return TLI.isTypeLegal(VT);
Reed Kotler3ebdcc92014-09-30 16:30:13 +0000343}
344
Reed Kotlerd4ea29e2014-10-14 18:27:58 +0000345bool MipsFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) {
346 if (isTypeLegal(Ty, VT))
Reed Kotler62de6b92014-10-11 00:55:18 +0000347 return true;
Reed Kotlerd4ea29e2014-10-14 18:27:58 +0000348 // We will extend this in a later patch:
349 // If this is a type than can be sign or zero-extended to a basic operation
350 // go ahead and accept it now.
351 if (VT == MVT::i8 || VT == MVT::i16)
352 return true;
Reed Kotler62de6b92014-10-11 00:55:18 +0000353 return false;
354}
Reed Kotler62de6b92014-10-11 00:55:18 +0000355// Because of how EmitCmp is called with fast-isel, you can
Reed Kotler497311a2014-10-10 17:39:51 +0000356// end up with redundant "andi" instructions after the sequences emitted below.
357// We should try and solve this issue in the future.
358//
Reed Kotlera562b462014-10-13 21:46:41 +0000359bool MipsFastISel::emitCmp(unsigned ResultReg, const CmpInst *CI) {
Reed Kotler62de6b92014-10-11 00:55:18 +0000360 const Value *Left = CI->getOperand(0), *Right = CI->getOperand(1);
Reed Kotler497311a2014-10-10 17:39:51 +0000361 bool IsUnsigned = CI->isUnsigned();
Reed Kotler497311a2014-10-10 17:39:51 +0000362 unsigned LeftReg = getRegEnsuringSimpleIntegerWidening(Left, IsUnsigned);
363 if (LeftReg == 0)
364 return false;
365 unsigned RightReg = getRegEnsuringSimpleIntegerWidening(Right, IsUnsigned);
366 if (RightReg == 0)
367 return false;
Reed Kotler1f64eca2014-10-10 20:46:28 +0000368 CmpInst::Predicate P = CI->getPredicate();
Reed Kotler62de6b92014-10-11 00:55:18 +0000369
Reed Kotler1f64eca2014-10-10 20:46:28 +0000370 switch (P) {
Reed Kotler497311a2014-10-10 17:39:51 +0000371 default:
372 return false;
373 case CmpInst::ICMP_EQ: {
374 unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
Reed Kotlera562b462014-10-13 21:46:41 +0000375 emitInst(Mips::XOR, TempReg).addReg(LeftReg).addReg(RightReg);
376 emitInst(Mips::SLTiu, ResultReg).addReg(TempReg).addImm(1);
Reed Kotler497311a2014-10-10 17:39:51 +0000377 break;
378 }
379 case CmpInst::ICMP_NE: {
380 unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
Reed Kotlera562b462014-10-13 21:46:41 +0000381 emitInst(Mips::XOR, TempReg).addReg(LeftReg).addReg(RightReg);
382 emitInst(Mips::SLTu, ResultReg).addReg(Mips::ZERO).addReg(TempReg);
Reed Kotler497311a2014-10-10 17:39:51 +0000383 break;
384 }
385 case CmpInst::ICMP_UGT: {
Reed Kotlera562b462014-10-13 21:46:41 +0000386 emitInst(Mips::SLTu, ResultReg).addReg(RightReg).addReg(LeftReg);
Reed Kotler497311a2014-10-10 17:39:51 +0000387 break;
388 }
389 case CmpInst::ICMP_ULT: {
Reed Kotlera562b462014-10-13 21:46:41 +0000390 emitInst(Mips::SLTu, ResultReg).addReg(LeftReg).addReg(RightReg);
Reed Kotler497311a2014-10-10 17:39:51 +0000391 break;
392 }
393 case CmpInst::ICMP_UGE: {
394 unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
Reed Kotlera562b462014-10-13 21:46:41 +0000395 emitInst(Mips::SLTu, TempReg).addReg(LeftReg).addReg(RightReg);
396 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
Reed Kotler497311a2014-10-10 17:39:51 +0000397 break;
398 }
399 case CmpInst::ICMP_ULE: {
400 unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
Reed Kotlera562b462014-10-13 21:46:41 +0000401 emitInst(Mips::SLTu, TempReg).addReg(RightReg).addReg(LeftReg);
402 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
Reed Kotler497311a2014-10-10 17:39:51 +0000403 break;
404 }
405 case CmpInst::ICMP_SGT: {
Reed Kotlera562b462014-10-13 21:46:41 +0000406 emitInst(Mips::SLT, ResultReg).addReg(RightReg).addReg(LeftReg);
Reed Kotler497311a2014-10-10 17:39:51 +0000407 break;
408 }
409 case CmpInst::ICMP_SLT: {
Reed Kotlera562b462014-10-13 21:46:41 +0000410 emitInst(Mips::SLT, ResultReg).addReg(LeftReg).addReg(RightReg);
Reed Kotler497311a2014-10-10 17:39:51 +0000411 break;
412 }
413 case CmpInst::ICMP_SGE: {
414 unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
Reed Kotlera562b462014-10-13 21:46:41 +0000415 emitInst(Mips::SLT, TempReg).addReg(LeftReg).addReg(RightReg);
416 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
Reed Kotler497311a2014-10-10 17:39:51 +0000417 break;
418 }
419 case CmpInst::ICMP_SLE: {
420 unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
Reed Kotlera562b462014-10-13 21:46:41 +0000421 emitInst(Mips::SLT, TempReg).addReg(RightReg).addReg(LeftReg);
422 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
Reed Kotler497311a2014-10-10 17:39:51 +0000423 break;
424 }
Reed Kotler1f64eca2014-10-10 20:46:28 +0000425 case CmpInst::FCMP_OEQ:
426 case CmpInst::FCMP_UNE:
427 case CmpInst::FCMP_OLT:
428 case CmpInst::FCMP_OLE:
429 case CmpInst::FCMP_OGT:
430 case CmpInst::FCMP_OGE: {
431 if (UnsupportedFPMode)
432 return false;
433 bool IsFloat = Left->getType()->isFloatTy();
434 bool IsDouble = Left->getType()->isDoubleTy();
435 if (!IsFloat && !IsDouble)
436 return false;
437 unsigned Opc, CondMovOpc;
438 switch (P) {
439 case CmpInst::FCMP_OEQ:
440 Opc = IsFloat ? Mips::C_EQ_S : Mips::C_EQ_D32;
441 CondMovOpc = Mips::MOVT_I;
442 break;
443 case CmpInst::FCMP_UNE:
444 Opc = IsFloat ? Mips::C_EQ_S : Mips::C_EQ_D32;
445 CondMovOpc = Mips::MOVF_I;
446 break;
447 case CmpInst::FCMP_OLT:
448 Opc = IsFloat ? Mips::C_OLT_S : Mips::C_OLT_D32;
449 CondMovOpc = Mips::MOVT_I;
450 break;
451 case CmpInst::FCMP_OLE:
452 Opc = IsFloat ? Mips::C_OLE_S : Mips::C_OLE_D32;
453 CondMovOpc = Mips::MOVT_I;
454 break;
455 case CmpInst::FCMP_OGT:
456 Opc = IsFloat ? Mips::C_ULE_S : Mips::C_ULE_D32;
457 CondMovOpc = Mips::MOVF_I;
458 break;
459 case CmpInst::FCMP_OGE:
460 Opc = IsFloat ? Mips::C_ULT_S : Mips::C_ULT_D32;
461 CondMovOpc = Mips::MOVF_I;
462 break;
463 default:
Chandler Carruth38811cc2014-10-10 21:07:03 +0000464 llvm_unreachable("Only switching of a subset of CCs.");
Reed Kotler1f64eca2014-10-10 20:46:28 +0000465 }
466 unsigned RegWithZero = createResultReg(&Mips::GPR32RegClass);
467 unsigned RegWithOne = createResultReg(&Mips::GPR32RegClass);
Reed Kotlera562b462014-10-13 21:46:41 +0000468 emitInst(Mips::ADDiu, RegWithZero).addReg(Mips::ZERO).addImm(0);
469 emitInst(Mips::ADDiu, RegWithOne).addReg(Mips::ZERO).addImm(1);
470 emitInst(Opc).addReg(LeftReg).addReg(RightReg).addReg(
Reed Kotler1f64eca2014-10-10 20:46:28 +0000471 Mips::FCC0, RegState::ImplicitDefine);
Reed Kotlera562b462014-10-13 21:46:41 +0000472 MachineInstrBuilder MI = emitInst(CondMovOpc, ResultReg)
Reed Kotler1f64eca2014-10-10 20:46:28 +0000473 .addReg(RegWithOne)
474 .addReg(Mips::FCC0)
475 .addReg(RegWithZero, RegState::Implicit);
476 MI->tieOperands(0, 3);
477 break;
478 }
Reed Kotler497311a2014-10-10 17:39:51 +0000479 }
Reed Kotler62de6b92014-10-11 00:55:18 +0000480 return true;
481}
Reed Kotlerd4ea29e2014-10-14 18:27:58 +0000482bool MipsFastISel::emitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
483 unsigned Alignment) {
484 //
485 // more cases will be handled here in following patches.
486 //
487 unsigned Opc;
488 switch (VT.SimpleTy) {
489 case MVT::i32: {
490 ResultReg = createResultReg(&Mips::GPR32RegClass);
491 Opc = Mips::LW;
492 break;
493 }
494 case MVT::i16: {
495 ResultReg = createResultReg(&Mips::GPR32RegClass);
496 Opc = Mips::LHu;
497 break;
498 }
499 case MVT::i8: {
500 ResultReg = createResultReg(&Mips::GPR32RegClass);
501 Opc = Mips::LBu;
502 break;
503 }
504 case MVT::f32: {
505 if (UnsupportedFPMode)
506 return false;
507 ResultReg = createResultReg(&Mips::FGR32RegClass);
508 Opc = Mips::LWC1;
509 break;
510 }
511 case MVT::f64: {
512 if (UnsupportedFPMode)
513 return false;
514 ResultReg = createResultReg(&Mips::AFGR64RegClass);
515 Opc = Mips::LDC1;
516 break;
517 }
518 default:
519 return false;
520 }
521 emitInstLoad(Opc, ResultReg, Addr.getReg(), Addr.getOffset());
522 return true;
523}
524
525bool MipsFastISel::emitStore(MVT VT, unsigned SrcReg, Address &Addr,
526 unsigned Alignment) {
527 //
528 // more cases will be handled here in following patches.
529 //
530 unsigned Opc;
531 switch (VT.SimpleTy) {
532 case MVT::i8:
533 Opc = Mips::SB;
534 break;
535 case MVT::i16:
536 Opc = Mips::SH;
537 break;
538 case MVT::i32:
539 Opc = Mips::SW;
540 break;
541 case MVT::f32:
542 if (UnsupportedFPMode)
543 return false;
544 Opc = Mips::SWC1;
545 break;
546 case MVT::f64:
547 if (UnsupportedFPMode)
548 return false;
549 Opc = Mips::SDC1;
550 break;
551 default:
552 return false;
553 }
554 emitInstStore(Opc, SrcReg, Addr.getReg(), Addr.getOffset());
555 return true;
556}
557
558bool MipsFastISel::selectLoad(const Instruction *I) {
559 // Atomic loads need special handling.
560 if (cast<LoadInst>(I)->isAtomic())
561 return false;
562
563 // Verify we have a legal type before going any further.
564 MVT VT;
565 if (!isLoadTypeLegal(I->getType(), VT))
566 return false;
567
568 // See if we can handle this address.
569 Address Addr;
570 if (!computeAddress(I->getOperand(0), Addr))
571 return false;
572
573 unsigned ResultReg;
574 if (!emitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment()))
575 return false;
576 updateValueMap(I, ResultReg);
577 return true;
578}
579
580bool MipsFastISel::selectStore(const Instruction *I) {
581 Value *Op0 = I->getOperand(0);
582 unsigned SrcReg = 0;
583
584 // Atomic stores need special handling.
585 if (cast<StoreInst>(I)->isAtomic())
586 return false;
587
588 // Verify we have a legal type before going any further.
589 MVT VT;
590 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT))
591 return false;
592
593 // Get the value to be stored into a register.
594 SrcReg = getRegForValue(Op0);
595 if (SrcReg == 0)
596 return false;
597
598 // See if we can handle this address.
599 Address Addr;
600 if (!computeAddress(I->getOperand(1), Addr))
601 return false;
602
603 if (!emitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment()))
604 return false;
605 return true;
606}
607
608//
609// This can cause a redundant sltiu to be generated.
610// FIXME: try and eliminate this in a future patch.
611//
612bool MipsFastISel::selectBranch(const Instruction *I) {
613 const BranchInst *BI = cast<BranchInst>(I);
614 MachineBasicBlock *BrBB = FuncInfo.MBB;
615 //
616 // TBB is the basic block for the case where the comparison is true.
617 // FBB is the basic block for the case where the comparison is false.
618 // if (cond) goto TBB
619 // goto FBB
620 // TBB:
621 //
622 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
623 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
624 BI->getCondition();
625 // For now, just try the simplest case where it's fed by a compare.
626 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
627 unsigned CondReg = createResultReg(&Mips::GPR32RegClass);
628 if (!emitCmp(CondReg, CI))
629 return false;
630 BuildMI(*BrBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::BGTZ))
631 .addReg(CondReg)
632 .addMBB(TBB);
633 fastEmitBranch(FBB, DbgLoc);
634 FuncInfo.MBB->addSuccessor(TBB);
635 return true;
636 }
637 return false;
638}
Reed Kotler62de6b92014-10-11 00:55:18 +0000639
Reed Kotlera562b462014-10-13 21:46:41 +0000640bool MipsFastISel::selectCmp(const Instruction *I) {
Reed Kotler62de6b92014-10-11 00:55:18 +0000641 const CmpInst *CI = cast<CmpInst>(I);
642 unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
Reed Kotlera562b462014-10-13 21:46:41 +0000643 if (!emitCmp(ResultReg, CI))
Reed Kotler62de6b92014-10-11 00:55:18 +0000644 return false;
Reed Kotler497311a2014-10-10 17:39:51 +0000645 updateValueMap(I, ResultReg);
646 return true;
647}
648
Reed Kotlerd4ea29e2014-10-14 18:27:58 +0000649// Attempt to fast-select a floating-point extend instruction.
650bool MipsFastISel::selectFPExt(const Instruction *I) {
651 if (UnsupportedFPMode)
652 return false;
653 Value *Src = I->getOperand(0);
654 EVT SrcVT = TLI.getValueType(Src->getType(), true);
655 EVT DestVT = TLI.getValueType(I->getType(), true);
656
657 if (SrcVT != MVT::f32 || DestVT != MVT::f64)
658 return false;
659
660 unsigned SrcReg =
661 getRegForValue(Src); // his must be a 32 bit floating point register class
662 // maybe we should handle this differently
663 if (!SrcReg)
664 return false;
665
666 unsigned DestReg = createResultReg(&Mips::AFGR64RegClass);
667 emitInst(Mips::CVT_D32_S, DestReg).addReg(SrcReg);
668 updateValueMap(I, DestReg);
669 return true;
670}
671
672// Attempt to fast-select a floating-point truncate instruction.
673bool MipsFastISel::selectFPTrunc(const Instruction *I) {
674 if (UnsupportedFPMode)
675 return false;
676 Value *Src = I->getOperand(0);
677 EVT SrcVT = TLI.getValueType(Src->getType(), true);
678 EVT DestVT = TLI.getValueType(I->getType(), true);
679
680 if (SrcVT != MVT::f64 || DestVT != MVT::f32)
681 return false;
682
683 unsigned SrcReg = getRegForValue(Src);
684 if (!SrcReg)
685 return false;
686
687 unsigned DestReg = createResultReg(&Mips::FGR32RegClass);
688 if (!DestReg)
689 return false;
690
691 emitInst(Mips::CVT_S_D32, DestReg).addReg(SrcReg);
692 updateValueMap(I, DestReg);
693 return true;
694}
695
696// Attempt to fast-select a floating-point-to-integer conversion.
697bool MipsFastISel::selectFPToInt(const Instruction *I, bool IsSigned) {
698 if (UnsupportedFPMode)
699 return false;
700 MVT DstVT, SrcVT;
701 if (!IsSigned)
702 return false; // We don't handle this case yet. There is no native
703 // instruction for this but it can be synthesized.
704 Type *DstTy = I->getType();
705 if (!isTypeLegal(DstTy, DstVT))
706 return false;
707
708 if (DstVT != MVT::i32)
709 return false;
710
711 Value *Src = I->getOperand(0);
712 Type *SrcTy = Src->getType();
713 if (!isTypeLegal(SrcTy, SrcVT))
714 return false;
715
716 if (SrcVT != MVT::f32 && SrcVT != MVT::f64)
717 return false;
718
719 unsigned SrcReg = getRegForValue(Src);
720 if (SrcReg == 0)
721 return false;
722
723 // Determine the opcode for the conversion, which takes place
724 // entirely within FPRs.
725 unsigned DestReg = createResultReg(&Mips::GPR32RegClass);
726 unsigned TempReg = createResultReg(&Mips::FGR32RegClass);
727 unsigned Opc;
728
729 if (SrcVT == MVT::f32)
730 Opc = Mips::TRUNC_W_S;
731 else
732 Opc = Mips::TRUNC_W_D32;
733
734 // Generate the convert.
735 emitInst(Opc, TempReg).addReg(SrcReg);
736
737 emitInst(Mips::MFC1, DestReg).addReg(TempReg);
738
739 updateValueMap(I, DestReg);
740 return true;
741}
742//
Reed Kotlerd5c41962014-11-13 23:37:45 +0000743bool MipsFastISel::processCallArgs(CallLoweringInfo &CLI,
744 SmallVectorImpl<MVT> &OutVTs,
745 unsigned &NumBytes) {
746 CallingConv::ID CC = CLI.CallConv;
747 SmallVector<CCValAssign, 16> ArgLocs;
748 CCState CCInfo(CC, false, *FuncInfo.MF, ArgLocs, *Context);
749 CCInfo.AnalyzeCallOperands(OutVTs, CLI.OutFlags, CCAssignFnForCall(CC));
750 // Get a count of how many bytes are to be pushed on the stack.
751 NumBytes = CCInfo.getNextStackOffset();
752 // This is the minimum argument area used for A0-A3.
753 if (NumBytes < 16)
754 NumBytes = 16;
755
756 emitInst(Mips::ADJCALLSTACKDOWN).addImm(16);
757 // Process the args.
758 MVT firstMVT;
759 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
760 CCValAssign &VA = ArgLocs[i];
761 const Value *ArgVal = CLI.OutVals[VA.getValNo()];
762 MVT ArgVT = OutVTs[VA.getValNo()];
763
764 if (i == 0) {
765 firstMVT = ArgVT;
766 if (ArgVT == MVT::f32) {
767 VA.convertToReg(Mips::F12);
768 } else if (ArgVT == MVT::f64) {
769 VA.convertToReg(Mips::D6);
770 }
771 } else if (i == 1) {
772 if ((firstMVT == MVT::f32) || (firstMVT == MVT::f64)) {
773 if (ArgVT == MVT::f32) {
774 VA.convertToReg(Mips::F14);
775 } else if (ArgVT == MVT::f64) {
776 VA.convertToReg(Mips::D7);
777 }
778 }
779 }
780 if (((ArgVT == MVT::i32) || (ArgVT == MVT::f32)) && VA.isMemLoc()) {
781 switch (VA.getLocMemOffset()) {
782 case 0:
783 VA.convertToReg(Mips::A0);
784 break;
785 case 4:
786 VA.convertToReg(Mips::A1);
787 break;
788 case 8:
789 VA.convertToReg(Mips::A2);
790 break;
791 case 12:
792 VA.convertToReg(Mips::A3);
793 break;
794 default:
795 break;
796 }
797 }
798 unsigned ArgReg = getRegForValue(ArgVal);
799 if (!ArgReg)
800 return false;
801
802 // Handle arg promotion: SExt, ZExt, AExt.
803 switch (VA.getLocInfo()) {
804 case CCValAssign::Full:
805 break;
806 case CCValAssign::AExt:
807 case CCValAssign::SExt: {
808 MVT DestVT = VA.getLocVT();
809 MVT SrcVT = ArgVT;
810 ArgReg = emitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/false);
811 if (!ArgReg)
812 return false;
813 break;
814 }
815 case CCValAssign::ZExt: {
816 MVT DestVT = VA.getLocVT();
817 MVT SrcVT = ArgVT;
818 ArgReg = emitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/true);
819 if (!ArgReg)
820 return false;
821 break;
822 }
823 default:
824 llvm_unreachable("Unknown arg promotion!");
825 }
826
827 // Now copy/store arg to correct locations.
828 if (VA.isRegLoc() && !VA.needsCustom()) {
829 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
830 TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg);
831 CLI.OutRegs.push_back(VA.getLocReg());
832 } else if (VA.needsCustom()) {
833 llvm_unreachable("Mips does not use custom args.");
834 return false;
835 } else {
836 //
837 // FIXME: This path will currently return false. It was copied
838 // from the AArch64 port and should be essentially fine for Mips too.
839 // The work to finish up this path will be done in a follow-on patch.
840 //
841 assert(VA.isMemLoc() && "Assuming store on stack.");
842 // Don't emit stores for undef values.
843 if (isa<UndefValue>(ArgVal))
844 continue;
845
846 // Need to store on the stack.
847 // FIXME: This alignment is incorrect but this path is disabled
848 // for now (will return false). We need to determine the right alignment
849 // based on the normal alignment for the underlying machine type.
850 //
851 unsigned ArgSize = RoundUpToAlignment(ArgVT.getSizeInBits(), 4);
852
853 unsigned BEAlign = 0;
854 if (ArgSize < 8 && !Subtarget->isLittle())
855 BEAlign = 8 - ArgSize;
856
857 Address Addr;
858 Addr.setKind(Address::RegBase);
859 Addr.setReg(Mips::SP);
860 Addr.setOffset(VA.getLocMemOffset() + BEAlign);
861
862 unsigned Alignment = DL.getABITypeAlignment(ArgVal->getType());
863 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
864 MachinePointerInfo::getStack(Addr.getOffset()),
865 MachineMemOperand::MOStore, ArgVT.getStoreSize(), Alignment);
866 (void)(MMO);
867 // if (!emitStore(ArgVT, ArgReg, Addr, MMO))
868 return false; // can't store on the stack yet.
869 }
870 }
871
872 return true;
873}
874
875bool MipsFastISel::finishCall(CallLoweringInfo &CLI, MVT RetVT,
876 unsigned NumBytes) {
877 CallingConv::ID CC = CLI.CallConv;
878 emitInst(Mips::ADJCALLSTACKUP).addImm(16);
879 if (RetVT != MVT::isVoid) {
880 SmallVector<CCValAssign, 16> RVLocs;
881 CCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context);
882 CCInfo.AnalyzeCallResult(RetVT, RetCC_Mips);
883
884 // Only handle a single return value.
885 if (RVLocs.size() != 1)
886 return false;
887 // Copy all of the result registers out of their specified physreg.
888 MVT CopyVT = RVLocs[0].getValVT();
889 // Special handling for extended integers.
890 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)
891 CopyVT = MVT::i32;
892
893 unsigned ResultReg = createResultReg(TLI.getRegClassFor(CopyVT));
894 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
895 TII.get(TargetOpcode::COPY),
896 ResultReg).addReg(RVLocs[0].getLocReg());
897 CLI.InRegs.push_back(RVLocs[0].getLocReg());
898
899 CLI.ResultReg = ResultReg;
900 CLI.NumResultRegs = 1;
901 }
902 return true;
903}
904
905bool MipsFastISel::fastLowerCall(CallLoweringInfo &CLI) {
906 CallingConv::ID CC = CLI.CallConv;
907 bool IsTailCall = CLI.IsTailCall;
908 bool IsVarArg = CLI.IsVarArg;
909 const Value *Callee = CLI.Callee;
910 // const char *SymName = CLI.SymName;
911
912 // Allow SelectionDAG isel to handle tail calls.
913 if (IsTailCall)
914 return false;
915
916 // Let SDISel handle vararg functions.
917 if (IsVarArg)
918 return false;
919
920 // FIXME: Only handle *simple* calls for now.
921 MVT RetVT;
922 if (CLI.RetTy->isVoidTy())
923 RetVT = MVT::isVoid;
924 else if (!isTypeLegal(CLI.RetTy, RetVT))
925 return false;
926
927 for (auto Flag : CLI.OutFlags)
928 if (Flag.isInReg() || Flag.isSRet() || Flag.isNest() || Flag.isByVal())
929 return false;
930
931 // Set up the argument vectors.
932 SmallVector<MVT, 16> OutVTs;
933 OutVTs.reserve(CLI.OutVals.size());
934
935 for (auto *Val : CLI.OutVals) {
936 MVT VT;
937 if (!isTypeLegal(Val->getType(), VT) &&
938 !(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16))
939 return false;
940
941 // We don't handle vector parameters yet.
942 if (VT.isVector() || VT.getSizeInBits() > 64)
943 return false;
944
945 OutVTs.push_back(VT);
946 }
947
948 Address Addr;
949 if (!computeCallAddress(Callee, Addr))
950 return false;
951
952 // Handle the arguments now that we've gotten them.
953 unsigned NumBytes;
954 if (!processCallArgs(CLI, OutVTs, NumBytes))
955 return false;
956
957 // Issue the call.
958 unsigned DestAddress = materializeGV(Addr.getGlobalValue(), MVT::i32);
959 emitInst(TargetOpcode::COPY, Mips::T9).addReg(DestAddress);
960 MachineInstrBuilder MIB =
961 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::JALR),
962 Mips::RA).addReg(Mips::T9);
963
964 // Add implicit physical register uses to the call.
965 for (auto Reg : CLI.OutRegs)
966 MIB.addReg(Reg, RegState::Implicit);
967
968 // Add a register mask with the call-preserved registers.
969 // Proper defs for return values will be added by setPhysRegsDeadExcept().
970 MIB.addRegMask(TRI.getCallPreservedMask(CC));
971
972 CLI.Call = MIB;
973
974 // Add implicit physical register uses to the call.
975 for (auto Reg : CLI.OutRegs)
976 MIB.addReg(Reg, RegState::Implicit);
977
978 // Add a register mask with the call-preserved registers. Proper
979 // defs for return values will be added by setPhysRegsDeadExcept().
980 MIB.addRegMask(TRI.getCallPreservedMask(CC));
981
982 CLI.Call = MIB;
983 // Finish off the call including any return values.
984 return finishCall(CLI, RetVT, NumBytes);
985}
986
Reed Kotlerd4ea29e2014-10-14 18:27:58 +0000987bool MipsFastISel::selectRet(const Instruction *I) {
Reed Kotleraa150ed2015-02-12 21:05:12 +0000988 const Function &F = *I->getParent()->getParent();
Reed Kotlerd4ea29e2014-10-14 18:27:58 +0000989 const ReturnInst *Ret = cast<ReturnInst>(I);
990
991 if (!FuncInfo.CanLowerReturn)
992 return false;
Reed Kotleraa150ed2015-02-12 21:05:12 +0000993
994 // Build a list of return value registers.
995 SmallVector<unsigned, 4> RetRegs;
996
Reed Kotlerd4ea29e2014-10-14 18:27:58 +0000997 if (Ret->getNumOperands() > 0) {
Reed Kotleraa150ed2015-02-12 21:05:12 +0000998 CallingConv::ID CC = F.getCallingConv();
999 SmallVector<ISD::OutputArg, 4> Outs;
1000 GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI);
1001 // Analyze operands of the call, assigning locations to each operand.
1002 SmallVector<CCValAssign, 16> ValLocs;
1003 MipsCCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs,
1004 I->getContext());
1005 CCAssignFn *RetCC = RetCC_Mips;
1006 CCInfo.AnalyzeReturn(Outs, RetCC);
1007
1008 // Only handle a single return value for now.
1009 if (ValLocs.size() != 1)
1010 return false;
1011
1012 CCValAssign &VA = ValLocs[0];
1013 const Value *RV = Ret->getOperand(0);
1014
1015 // Don't bother handling odd stuff for now.
1016 if ((VA.getLocInfo() != CCValAssign::Full) &&
1017 (VA.getLocInfo() != CCValAssign::BCvt))
1018 return false;
1019
1020 // Only handle register returns for now.
1021 if (!VA.isRegLoc())
1022 return false;
1023
1024 unsigned Reg = getRegForValue(RV);
1025 if (Reg == 0)
1026 return false;
1027
1028 unsigned SrcReg = Reg + VA.getValNo();
1029 unsigned DestReg = VA.getLocReg();
1030 // Avoid a cross-class copy. This is very unlikely.
1031 if (!MRI.getRegClass(SrcReg)->contains(DestReg))
1032 return false;
1033
1034 EVT RVEVT = TLI.getValueType(RV->getType());
1035 if (!RVEVT.isSimple())
1036 return false;
1037
1038 if (RVEVT.isVector())
1039 return false;
1040
1041 MVT RVVT = RVEVT.getSimpleVT();
1042 if (RVVT == MVT::f128)
1043 return false;
1044
1045 MVT DestVT = VA.getValVT();
1046 // Special handling for extended integers.
1047 if (RVVT != DestVT) {
1048 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
1049 return false;
1050
1051 if (!Outs[0].Flags.isZExt() && !Outs[0].Flags.isSExt())
1052 return false;
1053
1054 bool IsZExt = Outs[0].Flags.isZExt();
1055 SrcReg = emitIntExt(RVVT, SrcReg, DestVT, IsZExt);
1056 if (SrcReg == 0)
1057 return false;
1058 }
1059
1060 // Make the copy.
1061 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1062 TII.get(TargetOpcode::COPY), DestReg).addReg(SrcReg);
1063
1064 // Add register to return instruction.
1065 RetRegs.push_back(VA.getLocReg());
Reed Kotlerd4ea29e2014-10-14 18:27:58 +00001066 }
Reed Kotleraa150ed2015-02-12 21:05:12 +00001067 MachineInstrBuilder MIB = emitInst(Mips::RetRA);
1068 for (unsigned i = 0, e = RetRegs.size(); i != e; ++i)
1069 MIB.addReg(RetRegs[i], RegState::Implicit);
Reed Kotlerd4ea29e2014-10-14 18:27:58 +00001070 return true;
1071}
1072
1073bool MipsFastISel::selectTrunc(const Instruction *I) {
1074 // The high bits for a type smaller than the register size are assumed to be
1075 // undefined.
1076 Value *Op = I->getOperand(0);
1077
1078 EVT SrcVT, DestVT;
1079 SrcVT = TLI.getValueType(Op->getType(), true);
1080 DestVT = TLI.getValueType(I->getType(), true);
1081
1082 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
1083 return false;
1084 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
1085 return false;
1086
1087 unsigned SrcReg = getRegForValue(Op);
1088 if (!SrcReg)
1089 return false;
1090
1091 // Because the high bits are undefined, a truncate doesn't generate
1092 // any code.
1093 updateValueMap(I, SrcReg);
1094 return true;
1095}
1096bool MipsFastISel::selectIntExt(const Instruction *I) {
1097 Type *DestTy = I->getType();
1098 Value *Src = I->getOperand(0);
1099 Type *SrcTy = Src->getType();
1100
1101 bool isZExt = isa<ZExtInst>(I);
1102 unsigned SrcReg = getRegForValue(Src);
1103 if (!SrcReg)
1104 return false;
1105
1106 EVT SrcEVT, DestEVT;
1107 SrcEVT = TLI.getValueType(SrcTy, true);
1108 DestEVT = TLI.getValueType(DestTy, true);
1109 if (!SrcEVT.isSimple())
1110 return false;
1111 if (!DestEVT.isSimple())
1112 return false;
1113
1114 MVT SrcVT = SrcEVT.getSimpleVT();
1115 MVT DestVT = DestEVT.getSimpleVT();
1116 unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
1117
1118 if (!emitIntExt(SrcVT, SrcReg, DestVT, ResultReg, isZExt))
1119 return false;
1120 updateValueMap(I, ResultReg);
1121 return true;
1122}
1123bool MipsFastISel::emitIntSExt32r1(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1124 unsigned DestReg) {
1125 unsigned ShiftAmt;
1126 switch (SrcVT.SimpleTy) {
1127 default:
1128 return false;
1129 case MVT::i8:
1130 ShiftAmt = 24;
1131 break;
1132 case MVT::i16:
1133 ShiftAmt = 16;
1134 break;
1135 }
1136 unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
1137 emitInst(Mips::SLL, TempReg).addReg(SrcReg).addImm(ShiftAmt);
1138 emitInst(Mips::SRA, DestReg).addReg(TempReg).addImm(ShiftAmt);
1139 return true;
1140}
1141
1142bool MipsFastISel::emitIntSExt32r2(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1143 unsigned DestReg) {
1144 switch (SrcVT.SimpleTy) {
1145 default:
1146 return false;
1147 case MVT::i8:
1148 emitInst(Mips::SEB, DestReg).addReg(SrcReg);
1149 break;
1150 case MVT::i16:
1151 emitInst(Mips::SEH, DestReg).addReg(SrcReg);
1152 break;
1153 }
1154 return true;
1155}
1156
1157bool MipsFastISel::emitIntSExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1158 unsigned DestReg) {
1159 if ((DestVT != MVT::i32) && (DestVT != MVT::i16))
1160 return false;
1161 if (Subtarget->hasMips32r2())
1162 return emitIntSExt32r2(SrcVT, SrcReg, DestVT, DestReg);
1163 return emitIntSExt32r1(SrcVT, SrcReg, DestVT, DestReg);
1164}
1165
1166bool MipsFastISel::emitIntZExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1167 unsigned DestReg) {
1168 switch (SrcVT.SimpleTy) {
1169 default:
1170 return false;
1171 case MVT::i1:
1172 emitInst(Mips::ANDi, DestReg).addReg(SrcReg).addImm(1);
1173 break;
1174 case MVT::i8:
1175 emitInst(Mips::ANDi, DestReg).addReg(SrcReg).addImm(0xff);
1176 break;
1177 case MVT::i16:
1178 emitInst(Mips::ANDi, DestReg).addReg(SrcReg).addImm(0xffff);
Reed Kotlerd5c41962014-11-13 23:37:45 +00001179 break;
Reed Kotlerd4ea29e2014-10-14 18:27:58 +00001180 }
1181 return true;
1182}
1183
1184bool MipsFastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1185 unsigned DestReg, bool IsZExt) {
1186 if (IsZExt)
1187 return emitIntZExt(SrcVT, SrcReg, DestVT, DestReg);
1188 return emitIntSExt(SrcVT, SrcReg, DestVT, DestReg);
1189}
Reed Kotlerd5c41962014-11-13 23:37:45 +00001190
1191unsigned MipsFastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1192 bool isZExt) {
1193 unsigned DestReg = createResultReg(&Mips::GPR32RegClass);
Reed Kotleraa150ed2015-02-12 21:05:12 +00001194 bool Success = emitIntExt(SrcVT, SrcReg, DestVT, DestReg, isZExt);
1195 return Success ? DestReg : 0;
Reed Kotlerd5c41962014-11-13 23:37:45 +00001196}
1197
Juergen Ributzka5b8bb4d2014-09-03 20:56:52 +00001198bool MipsFastISel::fastSelectInstruction(const Instruction *I) {
Reed Kotler67077b32014-04-29 17:57:50 +00001199 if (!TargetSupported)
1200 return false;
1201 switch (I->getOpcode()) {
1202 default:
1203 break;
Reed Kotler9fe3bfd2014-06-16 22:05:47 +00001204 case Instruction::Load:
Reed Kotlera562b462014-10-13 21:46:41 +00001205 return selectLoad(I);
Reed Kotlerbab3f232014-05-01 20:39:21 +00001206 case Instruction::Store:
Reed Kotlera562b462014-10-13 21:46:41 +00001207 return selectStore(I);
Reed Kotler62de6b92014-10-11 00:55:18 +00001208 case Instruction::Br:
Reed Kotlera562b462014-10-13 21:46:41 +00001209 return selectBranch(I);
Reed Kotler67077b32014-04-29 17:57:50 +00001210 case Instruction::Ret:
Reed Kotlera562b462014-10-13 21:46:41 +00001211 return selectRet(I);
Reed Kotler3ebdcc92014-09-30 16:30:13 +00001212 case Instruction::Trunc:
Reed Kotlera562b462014-10-13 21:46:41 +00001213 return selectTrunc(I);
Reed Kotler3ebdcc92014-09-30 16:30:13 +00001214 case Instruction::ZExt:
1215 case Instruction::SExt:
Reed Kotlera562b462014-10-13 21:46:41 +00001216 return selectIntExt(I);
Reed Kotlerb9dc2482014-10-01 18:47:02 +00001217 case Instruction::FPTrunc:
Reed Kotlera562b462014-10-13 21:46:41 +00001218 return selectFPTrunc(I);
Reed Kotler3ebdcc92014-09-30 16:30:13 +00001219 case Instruction::FPExt:
Reed Kotlera562b462014-10-13 21:46:41 +00001220 return selectFPExt(I);
Reed Kotler12f94882014-10-10 17:00:46 +00001221 case Instruction::FPToSI:
Reed Kotlera562b462014-10-13 21:46:41 +00001222 return selectFPToInt(I, /*isSigned*/ true);
Reed Kotler12f94882014-10-10 17:00:46 +00001223 case Instruction::FPToUI:
Reed Kotlera562b462014-10-13 21:46:41 +00001224 return selectFPToInt(I, /*isSigned*/ false);
Reed Kotler497311a2014-10-10 17:39:51 +00001225 case Instruction::ICmp:
1226 case Instruction::FCmp:
Reed Kotlera562b462014-10-13 21:46:41 +00001227 return selectCmp(I);
Reed Kotler67077b32014-04-29 17:57:50 +00001228 }
1229 return false;
1230}
Reed Kotler720c5ca2014-04-17 22:15:34 +00001231
Reed Kotlerd4ea29e2014-10-14 18:27:58 +00001232unsigned MipsFastISel::getRegEnsuringSimpleIntegerWidening(const Value *V,
1233 bool IsUnsigned) {
1234 unsigned VReg = getRegForValue(V);
1235 if (VReg == 0)
Reed Kotler12f94882014-10-10 17:00:46 +00001236 return 0;
Reed Kotlerd4ea29e2014-10-14 18:27:58 +00001237 MVT VMVT = TLI.getValueType(V->getType(), true).getSimpleVT();
1238 if ((VMVT == MVT::i8) || (VMVT == MVT::i16)) {
1239 unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
1240 if (!emitIntExt(VMVT, VReg, MVT::i32, TempReg, IsUnsigned))
1241 return 0;
1242 VReg = TempReg;
Reed Kotler063d4fb2014-06-10 16:45:44 +00001243 }
Reed Kotlerd4ea29e2014-10-14 18:27:58 +00001244 return VReg;
Reed Kotlerbab3f232014-05-01 20:39:21 +00001245}
1246
Reed Kotler720c5ca2014-04-17 22:15:34 +00001247namespace llvm {
1248FastISel *Mips::createFastISel(FunctionLoweringInfo &funcInfo,
1249 const TargetLibraryInfo *libInfo) {
1250 return new MipsFastISel(funcInfo, libInfo);
1251}
1252}