blob: 6064813789774a4f0f2a193694dbaf09636aa25d [file] [log] [blame]
Eric Christopherab695882010-07-21 22:26:11 +00001//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the ARM-specific support for the FastISel class. Some
11// of the target-specific code is generated by tablegen in the file
12// ARMGenFastISel.inc, which is #included here.
13//
14//===----------------------------------------------------------------------===//
15
16#include "ARM.h"
Eric Christopher456144e2010-08-19 00:37:05 +000017#include "ARMBaseInstrInfo.h"
Eric Christopherd10cd7b2010-09-10 23:18:12 +000018#include "ARMCallingConv.h"
Eric Christopherc9932f62010-10-01 23:24:42 +000019#include "ARMConstantPoolValue.h"
Chandler Carruthd04a8d42012-12-03 16:50:05 +000020#include "ARMSubtarget.h"
21#include "ARMTargetMachine.h"
Evan Chengee04a6d2011-07-20 23:34:39 +000022#include "MCTargetDesc/ARMAddressingModes.h"
JF Bastien5ab77042013-06-11 22:13:46 +000023#include "llvm/ADT/STLExtras.h"
Chandler Carruthd04a8d42012-12-03 16:50:05 +000024#include "llvm/CodeGen/Analysis.h"
25#include "llvm/CodeGen/FastISel.h"
26#include "llvm/CodeGen/FunctionLoweringInfo.h"
27#include "llvm/CodeGen/MachineConstantPool.h"
28#include "llvm/CodeGen/MachineFrameInfo.h"
29#include "llvm/CodeGen/MachineInstrBuilder.h"
30#include "llvm/CodeGen/MachineMemOperand.h"
31#include "llvm/CodeGen/MachineModuleInfo.h"
32#include "llvm/CodeGen/MachineRegisterInfo.h"
Chandler Carruth0b8c9a82013-01-02 11:36:10 +000033#include "llvm/IR/CallingConv.h"
34#include "llvm/IR/DataLayout.h"
35#include "llvm/IR/DerivedTypes.h"
36#include "llvm/IR/GlobalVariable.h"
37#include "llvm/IR/Instructions.h"
38#include "llvm/IR/IntrinsicInst.h"
39#include "llvm/IR/Module.h"
40#include "llvm/IR/Operator.h"
Eric Christopherab695882010-07-21 22:26:11 +000041#include "llvm/Support/CallSite.h"
Eric Christopher038fea52010-08-17 00:46:57 +000042#include "llvm/Support/CommandLine.h"
Eric Christopherab695882010-07-21 22:26:11 +000043#include "llvm/Support/ErrorHandling.h"
44#include "llvm/Support/GetElementPtrTypeIterator.h"
JF Bastien8fc760c2013-06-07 20:10:37 +000045#include "llvm/Support/MathExtras.h"
Eric Christopher0fe7d542010-08-17 01:25:29 +000046#include "llvm/Target/TargetInstrInfo.h"
47#include "llvm/Target/TargetLowering.h"
48#include "llvm/Target/TargetMachine.h"
Eric Christopherab695882010-07-21 22:26:11 +000049#include "llvm/Target/TargetOptions.h"
50using namespace llvm;
51
Eric Christopher836c6242010-12-15 23:47:29 +000052extern cl::opt<bool> EnableARMLongCalls;
53
Eric Christopherab695882010-07-21 22:26:11 +000054namespace {
Eric Christopher827656d2010-11-20 22:38:27 +000055
Eric Christopher0d581222010-11-19 22:30:02 +000056 // All possible address modes, plus some.
57 typedef struct Address {
58 enum {
59 RegBase,
60 FrameIndexBase
61 } BaseType;
Eric Christopher827656d2010-11-20 22:38:27 +000062
Eric Christopher0d581222010-11-19 22:30:02 +000063 union {
64 unsigned Reg;
65 int FI;
66 } Base;
Eric Christopher827656d2010-11-20 22:38:27 +000067
Eric Christopher0d581222010-11-19 22:30:02 +000068 int Offset;
Eric Christopher827656d2010-11-20 22:38:27 +000069
Eric Christopher0d581222010-11-19 22:30:02 +000070 // Innocuous defaults for our address.
71 Address()
Jim Grosbach0c720762011-05-16 22:24:07 +000072 : BaseType(RegBase), Offset(0) {
Eric Christopher0d581222010-11-19 22:30:02 +000073 Base.Reg = 0;
74 }
75 } Address;
Eric Christopherab695882010-07-21 22:26:11 +000076
77class ARMFastISel : public FastISel {
78
79 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
80 /// make the right decision when generating code for different targets.
81 const ARMSubtarget *Subtarget;
Eric Christopher0fe7d542010-08-17 01:25:29 +000082 const TargetMachine &TM;
83 const TargetInstrInfo &TII;
84 const TargetLowering &TLI;
Eric Christopherc9932f62010-10-01 23:24:42 +000085 ARMFunctionInfo *AFI;
Eric Christopherab695882010-07-21 22:26:11 +000086
Eric Christopher8cf6c602010-09-29 22:24:45 +000087 // Convenience variables to avoid some queries.
Chad Rosier66dc8ca2011-11-08 21:12:00 +000088 bool isThumb2;
Eric Christopher8cf6c602010-09-29 22:24:45 +000089 LLVMContext *Context;
Eric Christophereaa204b2010-09-02 01:39:14 +000090
Eric Christopherab695882010-07-21 22:26:11 +000091 public:
Bob Wilsond49edb72012-08-03 04:06:28 +000092 explicit ARMFastISel(FunctionLoweringInfo &funcInfo,
93 const TargetLibraryInfo *libInfo)
94 : FastISel(funcInfo, libInfo),
Eric Christopher0fe7d542010-08-17 01:25:29 +000095 TM(funcInfo.MF->getTarget()),
96 TII(*TM.getInstrInfo()),
97 TLI(*TM.getTargetLowering()) {
Eric Christopherab695882010-07-21 22:26:11 +000098 Subtarget = &TM.getSubtarget<ARMSubtarget>();
Eric Christopher7fe55b72010-08-23 22:32:45 +000099 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>();
Chad Rosier66dc8ca2011-11-08 21:12:00 +0000100 isThumb2 = AFI->isThumbFunction();
Eric Christopher8cf6c602010-09-29 22:24:45 +0000101 Context = &funcInfo.Fn->getContext();
Eric Christopherab695882010-07-21 22:26:11 +0000102 }
103
Eric Christophercb592292010-08-20 00:20:31 +0000104 // Code from FastISel.cpp.
Craig Topper35fc62b2012-08-18 21:38:45 +0000105 private:
106 unsigned FastEmitInst_(unsigned MachineInstOpcode,
107 const TargetRegisterClass *RC);
108 unsigned FastEmitInst_r(unsigned MachineInstOpcode,
109 const TargetRegisterClass *RC,
110 unsigned Op0, bool Op0IsKill);
111 unsigned FastEmitInst_rr(unsigned MachineInstOpcode,
112 const TargetRegisterClass *RC,
113 unsigned Op0, bool Op0IsKill,
114 unsigned Op1, bool Op1IsKill);
115 unsigned FastEmitInst_rrr(unsigned MachineInstOpcode,
116 const TargetRegisterClass *RC,
117 unsigned Op0, bool Op0IsKill,
118 unsigned Op1, bool Op1IsKill,
119 unsigned Op2, bool Op2IsKill);
120 unsigned FastEmitInst_ri(unsigned MachineInstOpcode,
121 const TargetRegisterClass *RC,
122 unsigned Op0, bool Op0IsKill,
123 uint64_t Imm);
124 unsigned FastEmitInst_rf(unsigned MachineInstOpcode,
125 const TargetRegisterClass *RC,
126 unsigned Op0, bool Op0IsKill,
127 const ConstantFP *FPImm);
128 unsigned FastEmitInst_rri(unsigned MachineInstOpcode,
129 const TargetRegisterClass *RC,
130 unsigned Op0, bool Op0IsKill,
131 unsigned Op1, bool Op1IsKill,
132 uint64_t Imm);
133 unsigned FastEmitInst_i(unsigned MachineInstOpcode,
134 const TargetRegisterClass *RC,
135 uint64_t Imm);
136 unsigned FastEmitInst_ii(unsigned MachineInstOpcode,
137 const TargetRegisterClass *RC,
138 uint64_t Imm1, uint64_t Imm2);
Eric Christopheraf3dce52011-03-12 01:09:29 +0000139
Craig Topper35fc62b2012-08-18 21:38:45 +0000140 unsigned FastEmitInst_extractsubreg(MVT RetVT,
141 unsigned Op0, bool Op0IsKill,
142 uint32_t Idx);
Eric Christopherac1a19e2010-09-09 01:06:51 +0000143
Eric Christophercb592292010-08-20 00:20:31 +0000144 // Backend specific FastISel code.
Craig Topper35fc62b2012-08-18 21:38:45 +0000145 private:
Eric Christopherab695882010-07-21 22:26:11 +0000146 virtual bool TargetSelectInstruction(const Instruction *I);
Eric Christopher1b61ef42010-09-02 01:48:11 +0000147 virtual unsigned TargetMaterializeConstant(const Constant *C);
Eric Christopherf9764fa2010-09-30 20:49:44 +0000148 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI);
Eli Bendersky75299e32013-04-19 22:29:18 +0000149 virtual bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
150 const LoadInst *LI);
Evan Cheng092e5e72013-02-11 01:27:15 +0000151 virtual bool FastLowerArguments();
Craig Topper35fc62b2012-08-18 21:38:45 +0000152 private:
Eric Christopherab695882010-07-21 22:26:11 +0000153 #include "ARMGenFastISel.inc"
Eric Christopherac1a19e2010-09-09 01:06:51 +0000154
Eric Christopher83007122010-08-23 21:44:12 +0000155 // Instruction selection routines.
Eric Christopher44bff902010-09-10 23:10:30 +0000156 private:
Eric Christopher17787722010-10-21 21:47:51 +0000157 bool SelectLoad(const Instruction *I);
158 bool SelectStore(const Instruction *I);
159 bool SelectBranch(const Instruction *I);
Chad Rosier60c8fa62012-02-07 23:56:08 +0000160 bool SelectIndirectBr(const Instruction *I);
Eric Christopher17787722010-10-21 21:47:51 +0000161 bool SelectCmp(const Instruction *I);
162 bool SelectFPExt(const Instruction *I);
163 bool SelectFPTrunc(const Instruction *I);
Chad Rosier3901c3e2012-02-06 23:50:07 +0000164 bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode);
165 bool SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode);
Chad Rosierae46a332012-02-03 21:14:11 +0000166 bool SelectIToFP(const Instruction *I, bool isSigned);
167 bool SelectFPToI(const Instruction *I, bool isSigned);
Chad Rosier7ccb30b2012-02-03 21:07:27 +0000168 bool SelectDiv(const Instruction *I, bool isSigned);
Chad Rosier769422f2012-02-03 21:23:45 +0000169 bool SelectRem(const Instruction *I, bool isSigned);
Chad Rosier11add262011-11-11 23:31:03 +0000170 bool SelectCall(const Instruction *I, const char *IntrMemName);
171 bool SelectIntrinsicCall(const IntrinsicInst &I);
Eric Christopher17787722010-10-21 21:47:51 +0000172 bool SelectSelect(const Instruction *I);
Eric Christopher4f512ef2010-10-22 01:28:00 +0000173 bool SelectRet(const Instruction *I);
Chad Rosier0d7b2312011-11-02 00:18:48 +0000174 bool SelectTrunc(const Instruction *I);
175 bool SelectIntExt(const Instruction *I);
Jush Lu29465492012-08-03 02:37:48 +0000176 bool SelectShift(const Instruction *I, ARM_AM::ShiftOpc ShiftTy);
Eric Christopherab695882010-07-21 22:26:11 +0000177
Eric Christopher83007122010-08-23 21:44:12 +0000178 // Utility routines.
Eric Christopher456144e2010-08-19 00:37:05 +0000179 private:
Chris Lattnerdb125cf2011-07-18 04:54:35 +0000180 bool isTypeLegal(Type *Ty, MVT &VT);
181 bool isLoadTypeLegal(Type *Ty, MVT &VT);
Chad Rosiere07cd5e2011-11-02 18:08:25 +0000182 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
183 bool isZExt);
Patrik Hagglunda61b17c2012-12-13 06:34:11 +0000184 bool ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
Chad Rosier404ed3c2011-12-14 17:26:05 +0000185 unsigned Alignment = 0, bool isZExt = true,
186 bool allocReg = true);
Patrik Hagglunda61b17c2012-12-13 06:34:11 +0000187 bool ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
Bob Wilson6ce2dea2011-12-04 00:52:23 +0000188 unsigned Alignment = 0);
Eric Christopher0d581222010-11-19 22:30:02 +0000189 bool ARMComputeAddress(const Value *Obj, Address &Addr);
Chad Rosier6290b932012-12-17 22:35:29 +0000190 void ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3);
Chad Rosier2c42b8c2011-11-14 23:04:09 +0000191 bool ARMIsMemCpySmall(uint64_t Len);
Chad Rosierc9758b12012-12-06 01:34:31 +0000192 bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len,
193 unsigned Alignment);
Chad Rosier316a5aa2012-12-17 19:59:43 +0000194 unsigned ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt);
Patrik Hagglunda61b17c2012-12-13 06:34:11 +0000195 unsigned ARMMaterializeFP(const ConstantFP *CFP, MVT VT);
196 unsigned ARMMaterializeInt(const Constant *C, MVT VT);
197 unsigned ARMMaterializeGV(const GlobalValue *GV, MVT VT);
198 unsigned ARMMoveToFPReg(MVT VT, unsigned SrcReg);
199 unsigned ARMMoveToIntReg(MVT VT, unsigned SrcReg);
Chad Rosier49d6fc02012-06-12 19:25:13 +0000200 unsigned ARMSelectCallOp(bool UseReg);
Patrik Hagglunda61b17c2012-12-13 06:34:11 +0000201 unsigned ARMLowerPICELF(const GlobalValue *GV, unsigned Align, MVT VT);
Eric Christopherac1a19e2010-09-09 01:06:51 +0000202
Eric Christopherd10cd7b2010-09-10 23:18:12 +0000203 // Call handling routines.
204 private:
Jush Luee649832012-07-19 09:49:00 +0000205 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC,
206 bool Return,
207 bool isVarArg);
Eric Christopherdccd2c32010-10-11 08:38:55 +0000208 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args,
Eric Christophera9a7a1a2010-09-29 23:11:09 +0000209 SmallVectorImpl<unsigned> &ArgRegs,
Duncan Sands1440e8b2010-11-03 11:35:31 +0000210 SmallVectorImpl<MVT> &ArgVTs,
Eric Christophera9a7a1a2010-09-29 23:11:09 +0000211 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
212 SmallVectorImpl<unsigned> &RegArgs,
213 CallingConv::ID CC,
Jush Luee649832012-07-19 09:49:00 +0000214 unsigned &NumBytes,
215 bool isVarArg);
Chad Rosier49d6fc02012-06-12 19:25:13 +0000216 unsigned getLibcallReg(const Twine &Name);
Duncan Sands1440e8b2010-11-03 11:35:31 +0000217 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
Eric Christophera9a7a1a2010-09-29 23:11:09 +0000218 const Instruction *I, CallingConv::ID CC,
Jush Luee649832012-07-19 09:49:00 +0000219 unsigned &NumBytes, bool isVarArg);
Eric Christopher7ed8ec92010-09-28 01:21:42 +0000220 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call);
Eric Christopherd10cd7b2010-09-10 23:18:12 +0000221
222 // OptionalDef handling routines.
223 private:
Eric Christopheraf3dce52011-03-12 01:09:29 +0000224 bool isARMNEONPred(const MachineInstr *MI);
Eric Christopher456144e2010-08-19 00:37:05 +0000225 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR);
226 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB);
Chad Rosier6290b932012-12-17 22:35:29 +0000227 void AddLoadStoreOperands(MVT VT, Address &Addr,
Cameron Zwarichc152aa62011-05-28 20:34:49 +0000228 const MachineInstrBuilder &MIB,
Chad Rosierb29b9502011-11-13 02:23:59 +0000229 unsigned Flags, bool useAM3);
Eric Christopher456144e2010-08-19 00:37:05 +0000230};
Eric Christopherab695882010-07-21 22:26:11 +0000231
232} // end anonymous namespace
233
Eric Christopherd10cd7b2010-09-10 23:18:12 +0000234#include "ARMGenCallingConv.inc"
Eric Christopherab695882010-07-21 22:26:11 +0000235
Eric Christopher456144e2010-08-19 00:37:05 +0000236// DefinesOptionalPredicate - This is different from DefinesPredicate in that
237// we don't care about implicit defs here, just places we'll need to add a
238// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR.
239bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) {
Evan Cheng5a96b3d2011-12-07 07:15:52 +0000240 if (!MI->hasOptionalDef())
Eric Christopher456144e2010-08-19 00:37:05 +0000241 return false;
242
243 // Look to see if our OptionalDef is defining CPSR or CCR.
244 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
245 const MachineOperand &MO = MI->getOperand(i);
Eric Christopherf762fbe2010-08-20 00:36:24 +0000246 if (!MO.isReg() || !MO.isDef()) continue;
247 if (MO.getReg() == ARM::CPSR)
Eric Christopher456144e2010-08-19 00:37:05 +0000248 *CPSR = true;
249 }
250 return true;
251}
252
Eric Christopheraf3dce52011-03-12 01:09:29 +0000253bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) {
Evan Chenge837dea2011-06-28 19:10:37 +0000254 const MCInstrDesc &MCID = MI->getDesc();
Eric Christopher299bbb22011-04-29 00:03:10 +0000255
Eric Christopheraf3dce52011-03-12 01:09:29 +0000256 // If we're a thumb2 or not NEON function we were handled via isPredicable.
Evan Chenge837dea2011-06-28 19:10:37 +0000257 if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON ||
Eric Christopheraf3dce52011-03-12 01:09:29 +0000258 AFI->isThumb2Function())
259 return false;
Eric Christopher299bbb22011-04-29 00:03:10 +0000260
Evan Chenge837dea2011-06-28 19:10:37 +0000261 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i)
262 if (MCID.OpInfo[i].isPredicate())
Eric Christopheraf3dce52011-03-12 01:09:29 +0000263 return true;
Eric Christopher299bbb22011-04-29 00:03:10 +0000264
Eric Christopheraf3dce52011-03-12 01:09:29 +0000265 return false;
266}
267
Eric Christopher456144e2010-08-19 00:37:05 +0000268// If the machine is predicable go ahead and add the predicate operands, if
269// it needs default CC operands add those.
Eric Christopheraaa8df42010-11-02 01:21:28 +0000270// TODO: If we want to support thumb1 then we'll need to deal with optional
271// CPSR defs that need to be added before the remaining operands. See s_cc_out
272// for descriptions why.
Eric Christopher456144e2010-08-19 00:37:05 +0000273const MachineInstrBuilder &
274ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {
275 MachineInstr *MI = &*MIB;
276
Eric Christopheraf3dce52011-03-12 01:09:29 +0000277 // Do we use a predicate? or...
278 // Are we NEON in ARM mode and have a predicate operand? If so, I know
279 // we're not predicable but add it anyways.
280 if (TII.isPredicable(MI) || isARMNEONPred(MI))
Eric Christopher456144e2010-08-19 00:37:05 +0000281 AddDefaultPred(MIB);
Eric Christopher299bbb22011-04-29 00:03:10 +0000282
Sylvestre Ledru94c22712012-09-27 10:14:43 +0000283 // Do we optionally set a predicate? Preds is size > 0 iff the predicate
Eric Christopher456144e2010-08-19 00:37:05 +0000284 // defines CPSR. All other OptionalDefines in ARM are the CCR register.
Eric Christopher979e0a12010-08-19 15:35:27 +0000285 bool CPSR = false;
Eric Christopher456144e2010-08-19 00:37:05 +0000286 if (DefinesOptionalPredicate(MI, &CPSR)) {
287 if (CPSR)
288 AddDefaultT1CC(MIB);
289 else
290 AddDefaultCC(MIB);
291 }
292 return MIB;
293}
294
Eric Christopher0fe7d542010-08-17 01:25:29 +0000295unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode,
296 const TargetRegisterClass* RC) {
297 unsigned ResultReg = createResultReg(RC);
Evan Chenge837dea2011-06-28 19:10:37 +0000298 const MCInstrDesc &II = TII.get(MachineInstOpcode);
Eric Christopher0fe7d542010-08-17 01:25:29 +0000299
Eric Christopher456144e2010-08-19 00:37:05 +0000300 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg));
Eric Christopher0fe7d542010-08-17 01:25:29 +0000301 return ResultReg;
302}
303
304unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode,
305 const TargetRegisterClass *RC,
306 unsigned Op0, bool Op0IsKill) {
307 unsigned ResultReg = createResultReg(RC);
Evan Chenge837dea2011-06-28 19:10:37 +0000308 const MCInstrDesc &II = TII.get(MachineInstOpcode);
Eric Christopher0fe7d542010-08-17 01:25:29 +0000309
Chad Rosier40d552e2012-02-15 17:36:21 +0000310 if (II.getNumDefs() >= 1) {
Eric Christopher456144e2010-08-19 00:37:05 +0000311 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
Eric Christopher0fe7d542010-08-17 01:25:29 +0000312 .addReg(Op0, Op0IsKill * RegState::Kill));
Chad Rosier40d552e2012-02-15 17:36:21 +0000313 } else {
Eric Christopher456144e2010-08-19 00:37:05 +0000314 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
Eric Christopher0fe7d542010-08-17 01:25:29 +0000315 .addReg(Op0, Op0IsKill * RegState::Kill));
Eric Christopher456144e2010-08-19 00:37:05 +0000316 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
Eric Christopher0fe7d542010-08-17 01:25:29 +0000317 TII.get(TargetOpcode::COPY), ResultReg)
318 .addReg(II.ImplicitDefs[0]));
319 }
320 return ResultReg;
321}
322
323unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
324 const TargetRegisterClass *RC,
325 unsigned Op0, bool Op0IsKill,
326 unsigned Op1, bool Op1IsKill) {
327 unsigned ResultReg = createResultReg(RC);
Evan Chenge837dea2011-06-28 19:10:37 +0000328 const MCInstrDesc &II = TII.get(MachineInstOpcode);
Eric Christopher0fe7d542010-08-17 01:25:29 +0000329
Chad Rosier40d552e2012-02-15 17:36:21 +0000330 if (II.getNumDefs() >= 1) {
Eric Christopher456144e2010-08-19 00:37:05 +0000331 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
Eric Christopher0fe7d542010-08-17 01:25:29 +0000332 .addReg(Op0, Op0IsKill * RegState::Kill)
333 .addReg(Op1, Op1IsKill * RegState::Kill));
Chad Rosier40d552e2012-02-15 17:36:21 +0000334 } else {
Eric Christopher456144e2010-08-19 00:37:05 +0000335 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
Eric Christopher0fe7d542010-08-17 01:25:29 +0000336 .addReg(Op0, Op0IsKill * RegState::Kill)
337 .addReg(Op1, Op1IsKill * RegState::Kill));
Eric Christopher456144e2010-08-19 00:37:05 +0000338 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
Eric Christopher0fe7d542010-08-17 01:25:29 +0000339 TII.get(TargetOpcode::COPY), ResultReg)
340 .addReg(II.ImplicitDefs[0]));
341 }
342 return ResultReg;
343}
344
Cameron Zwarichc0e6d782011-03-30 23:01:21 +0000345unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode,
346 const TargetRegisterClass *RC,
347 unsigned Op0, bool Op0IsKill,
348 unsigned Op1, bool Op1IsKill,
349 unsigned Op2, bool Op2IsKill) {
350 unsigned ResultReg = createResultReg(RC);
Evan Chenge837dea2011-06-28 19:10:37 +0000351 const MCInstrDesc &II = TII.get(MachineInstOpcode);
Cameron Zwarichc0e6d782011-03-30 23:01:21 +0000352
Chad Rosier40d552e2012-02-15 17:36:21 +0000353 if (II.getNumDefs() >= 1) {
Cameron Zwarichc0e6d782011-03-30 23:01:21 +0000354 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
355 .addReg(Op0, Op0IsKill * RegState::Kill)
356 .addReg(Op1, Op1IsKill * RegState::Kill)
357 .addReg(Op2, Op2IsKill * RegState::Kill));
Chad Rosier40d552e2012-02-15 17:36:21 +0000358 } else {
Cameron Zwarichc0e6d782011-03-30 23:01:21 +0000359 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
360 .addReg(Op0, Op0IsKill * RegState::Kill)
361 .addReg(Op1, Op1IsKill * RegState::Kill)
362 .addReg(Op2, Op2IsKill * RegState::Kill));
363 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
364 TII.get(TargetOpcode::COPY), ResultReg)
365 .addReg(II.ImplicitDefs[0]));
366 }
367 return ResultReg;
368}
369
Eric Christopher0fe7d542010-08-17 01:25:29 +0000370unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
371 const TargetRegisterClass *RC,
372 unsigned Op0, bool Op0IsKill,
373 uint64_t Imm) {
374 unsigned ResultReg = createResultReg(RC);
Evan Chenge837dea2011-06-28 19:10:37 +0000375 const MCInstrDesc &II = TII.get(MachineInstOpcode);
Eric Christopher0fe7d542010-08-17 01:25:29 +0000376
Chad Rosier40d552e2012-02-15 17:36:21 +0000377 if (II.getNumDefs() >= 1) {
Eric Christopher456144e2010-08-19 00:37:05 +0000378 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
Eric Christopher0fe7d542010-08-17 01:25:29 +0000379 .addReg(Op0, Op0IsKill * RegState::Kill)
380 .addImm(Imm));
Chad Rosier40d552e2012-02-15 17:36:21 +0000381 } else {
Eric Christopher456144e2010-08-19 00:37:05 +0000382 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
Eric Christopher0fe7d542010-08-17 01:25:29 +0000383 .addReg(Op0, Op0IsKill * RegState::Kill)
384 .addImm(Imm));
Eric Christopher456144e2010-08-19 00:37:05 +0000385 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
Eric Christopher0fe7d542010-08-17 01:25:29 +0000386 TII.get(TargetOpcode::COPY), ResultReg)
387 .addReg(II.ImplicitDefs[0]));
388 }
389 return ResultReg;
390}
391
392unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
393 const TargetRegisterClass *RC,
394 unsigned Op0, bool Op0IsKill,
395 const ConstantFP *FPImm) {
396 unsigned ResultReg = createResultReg(RC);
Evan Chenge837dea2011-06-28 19:10:37 +0000397 const MCInstrDesc &II = TII.get(MachineInstOpcode);
Eric Christopher0fe7d542010-08-17 01:25:29 +0000398
Chad Rosier40d552e2012-02-15 17:36:21 +0000399 if (II.getNumDefs() >= 1) {
Eric Christopher456144e2010-08-19 00:37:05 +0000400 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
Eric Christopher0fe7d542010-08-17 01:25:29 +0000401 .addReg(Op0, Op0IsKill * RegState::Kill)
402 .addFPImm(FPImm));
Chad Rosier40d552e2012-02-15 17:36:21 +0000403 } else {
Eric Christopher456144e2010-08-19 00:37:05 +0000404 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
Eric Christopher0fe7d542010-08-17 01:25:29 +0000405 .addReg(Op0, Op0IsKill * RegState::Kill)
406 .addFPImm(FPImm));
Eric Christopher456144e2010-08-19 00:37:05 +0000407 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
Eric Christopher0fe7d542010-08-17 01:25:29 +0000408 TII.get(TargetOpcode::COPY), ResultReg)
409 .addReg(II.ImplicitDefs[0]));
410 }
411 return ResultReg;
412}
413
414unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
415 const TargetRegisterClass *RC,
416 unsigned Op0, bool Op0IsKill,
417 unsigned Op1, bool Op1IsKill,
418 uint64_t Imm) {
419 unsigned ResultReg = createResultReg(RC);
Evan Chenge837dea2011-06-28 19:10:37 +0000420 const MCInstrDesc &II = TII.get(MachineInstOpcode);
Eric Christopher0fe7d542010-08-17 01:25:29 +0000421
Chad Rosier40d552e2012-02-15 17:36:21 +0000422 if (II.getNumDefs() >= 1) {
Eric Christopher456144e2010-08-19 00:37:05 +0000423 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
Eric Christopher0fe7d542010-08-17 01:25:29 +0000424 .addReg(Op0, Op0IsKill * RegState::Kill)
425 .addReg(Op1, Op1IsKill * RegState::Kill)
426 .addImm(Imm));
Chad Rosier40d552e2012-02-15 17:36:21 +0000427 } else {
Eric Christopher456144e2010-08-19 00:37:05 +0000428 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
Eric Christopher0fe7d542010-08-17 01:25:29 +0000429 .addReg(Op0, Op0IsKill * RegState::Kill)
430 .addReg(Op1, Op1IsKill * RegState::Kill)
431 .addImm(Imm));
Eric Christopher456144e2010-08-19 00:37:05 +0000432 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
Eric Christopher0fe7d542010-08-17 01:25:29 +0000433 TII.get(TargetOpcode::COPY), ResultReg)
434 .addReg(II.ImplicitDefs[0]));
435 }
436 return ResultReg;
437}
438
439unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode,
440 const TargetRegisterClass *RC,
441 uint64_t Imm) {
442 unsigned ResultReg = createResultReg(RC);
Evan Chenge837dea2011-06-28 19:10:37 +0000443 const MCInstrDesc &II = TII.get(MachineInstOpcode);
Eric Christopherac1a19e2010-09-09 01:06:51 +0000444
Chad Rosier40d552e2012-02-15 17:36:21 +0000445 if (II.getNumDefs() >= 1) {
Eric Christopher456144e2010-08-19 00:37:05 +0000446 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
Eric Christopher0fe7d542010-08-17 01:25:29 +0000447 .addImm(Imm));
Chad Rosier40d552e2012-02-15 17:36:21 +0000448 } else {
Eric Christopher456144e2010-08-19 00:37:05 +0000449 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
Eric Christopher0fe7d542010-08-17 01:25:29 +0000450 .addImm(Imm));
Eric Christopher456144e2010-08-19 00:37:05 +0000451 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
Eric Christopher0fe7d542010-08-17 01:25:29 +0000452 TII.get(TargetOpcode::COPY), ResultReg)
453 .addReg(II.ImplicitDefs[0]));
454 }
455 return ResultReg;
456}
457
Eric Christopherd94bc542011-04-29 22:07:50 +0000458unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode,
459 const TargetRegisterClass *RC,
460 uint64_t Imm1, uint64_t Imm2) {
461 unsigned ResultReg = createResultReg(RC);
Evan Chenge837dea2011-06-28 19:10:37 +0000462 const MCInstrDesc &II = TII.get(MachineInstOpcode);
Eric Christopher471e4222011-06-08 23:55:35 +0000463
Chad Rosier40d552e2012-02-15 17:36:21 +0000464 if (II.getNumDefs() >= 1) {
Eric Christopherd94bc542011-04-29 22:07:50 +0000465 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
466 .addImm(Imm1).addImm(Imm2));
Chad Rosier40d552e2012-02-15 17:36:21 +0000467 } else {
Eric Christopherd94bc542011-04-29 22:07:50 +0000468 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
469 .addImm(Imm1).addImm(Imm2));
Eric Christopher471e4222011-06-08 23:55:35 +0000470 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
Eric Christopherd94bc542011-04-29 22:07:50 +0000471 TII.get(TargetOpcode::COPY),
472 ResultReg)
473 .addReg(II.ImplicitDefs[0]));
474 }
475 return ResultReg;
476}
477
Eric Christopher0fe7d542010-08-17 01:25:29 +0000478unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT,
479 unsigned Op0, bool Op0IsKill,
480 uint32_t Idx) {
481 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
482 assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
483 "Cannot yet extract from physregs");
Chad Rosier40d552e2012-02-15 17:36:21 +0000484
Eric Christopher456144e2010-08-19 00:37:05 +0000485 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
Chad Rosier40d552e2012-02-15 17:36:21 +0000486 DL, TII.get(TargetOpcode::COPY), ResultReg)
487 .addReg(Op0, getKillRegState(Op0IsKill), Idx));
Eric Christopher0fe7d542010-08-17 01:25:29 +0000488 return ResultReg;
489}
490
Eric Christopherdb12b2b2010-09-10 00:34:35 +0000491// TODO: Don't worry about 64-bit now, but when this is fixed remove the
492// checks from the various callers.
Patrik Hagglunda61b17c2012-12-13 06:34:11 +0000493unsigned ARMFastISel::ARMMoveToFPReg(MVT VT, unsigned SrcReg) {
Duncan Sandscdfad362010-11-03 12:17:33 +0000494 if (VT == MVT::f64) return 0;
Eric Christopherdccd2c32010-10-11 08:38:55 +0000495
Eric Christopher9ee4ce22010-09-09 21:44:45 +0000496 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
497 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
Jim Grosbache751c002012-03-01 22:47:09 +0000498 TII.get(ARM::VMOVSR), MoveReg)
Eric Christopher9ee4ce22010-09-09 21:44:45 +0000499 .addReg(SrcReg));
500 return MoveReg;
501}
502
Patrik Hagglunda61b17c2012-12-13 06:34:11 +0000503unsigned ARMFastISel::ARMMoveToIntReg(MVT VT, unsigned SrcReg) {
Duncan Sandscdfad362010-11-03 12:17:33 +0000504 if (VT == MVT::i64) return 0;
Eric Christopherdccd2c32010-10-11 08:38:55 +0000505
Eric Christopheraa3ace12010-09-09 20:49:25 +0000506 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
507 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
Jim Grosbache751c002012-03-01 22:47:09 +0000508 TII.get(ARM::VMOVRS), MoveReg)
Eric Christopheraa3ace12010-09-09 20:49:25 +0000509 .addReg(SrcReg));
510 return MoveReg;
511}
512
Eric Christopher9ed58df2010-09-09 00:19:41 +0000513// For double width floating point we need to materialize two constants
514// (the high and the low) into integer registers then use a move to get
515// the combined constant into an FP reg.
Patrik Hagglunda61b17c2012-12-13 06:34:11 +0000516unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) {
Eric Christopher9ed58df2010-09-09 00:19:41 +0000517 const APFloat Val = CFP->getValueAPF();
Duncan Sandscdfad362010-11-03 12:17:33 +0000518 bool is64bit = VT == MVT::f64;
Eric Christopherac1a19e2010-09-09 01:06:51 +0000519
Eric Christopher9ed58df2010-09-09 00:19:41 +0000520 // This checks to see if we can use VFP3 instructions to materialize
521 // a constant, otherwise we have to go through the constant pool.
522 if (TLI.isFPImmLegal(Val, VT)) {
Jim Grosbach4ebbf7b2011-09-30 00:50:06 +0000523 int Imm;
524 unsigned Opc;
525 if (is64bit) {
526 Imm = ARM_AM::getFP64Imm(Val);
527 Opc = ARM::FCONSTD;
528 } else {
529 Imm = ARM_AM::getFP32Imm(Val);
530 Opc = ARM::FCONSTS;
531 }
Eric Christopher9ed58df2010-09-09 00:19:41 +0000532 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
533 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
534 DestReg)
Jim Grosbach4ebbf7b2011-09-30 00:50:06 +0000535 .addImm(Imm));
Eric Christopher9ed58df2010-09-09 00:19:41 +0000536 return DestReg;
537 }
Eric Christopherdccd2c32010-10-11 08:38:55 +0000538
Eric Christopherdb12b2b2010-09-10 00:34:35 +0000539 // Require VFP2 for loading fp constants.
Eric Christopher238bb162010-09-09 23:50:00 +0000540 if (!Subtarget->hasVFP2()) return false;
Eric Christopherdccd2c32010-10-11 08:38:55 +0000541
Eric Christopher238bb162010-09-09 23:50:00 +0000542 // MachineConstantPool wants an explicit alignment.
543 unsigned Align = TD.getPrefTypeAlignment(CFP->getType());
544 if (Align == 0) {
545 // TODO: Figure out if this is correct.
546 Align = TD.getTypeAllocSize(CFP->getType());
547 }
548 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align);
549 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
550 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS;
Eric Christopherdccd2c32010-10-11 08:38:55 +0000551
Eric Christopherdb12b2b2010-09-10 00:34:35 +0000552 // The extra reg is for addrmode5.
Eric Christopherf5732c42010-09-28 00:35:09 +0000553 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
554 DestReg)
555 .addConstantPoolIndex(Idx)
Eric Christopher238bb162010-09-09 23:50:00 +0000556 .addReg(0));
557 return DestReg;
Eric Christopher9ed58df2010-09-09 00:19:41 +0000558}
559
Patrik Hagglunda61b17c2012-12-13 06:34:11 +0000560unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) {
Eric Christopherdccd2c32010-10-11 08:38:55 +0000561
Chad Rosier44e89572011-11-04 22:29:00 +0000562 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1)
563 return false;
Eric Christophere5b13cf2010-11-03 20:21:17 +0000564
565 // If we can do this in a single instruction without a constant pool entry
566 // do so now.
567 const ConstantInt *CI = cast<ConstantInt>(C);
Chad Rosiera4e07272011-11-04 23:09:49 +0000568 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) {
Chad Rosier66dc8ca2011-11-08 21:12:00 +0000569 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16;
Chad Rosierfc17ddd2012-11-27 01:06:49 +0000570 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass :
571 &ARM::GPRRegClass;
572 unsigned ImmReg = createResultReg(RC);
Eric Christophere5b13cf2010-11-03 20:21:17 +0000573 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
Chad Rosier44e89572011-11-04 22:29:00 +0000574 TII.get(Opc), ImmReg)
Chad Rosier42536af2011-11-05 20:16:15 +0000575 .addImm(CI->getZExtValue()));
Chad Rosier44e89572011-11-04 22:29:00 +0000576 return ImmReg;
Eric Christophere5b13cf2010-11-03 20:21:17 +0000577 }
578
Chad Rosier4e89d972011-11-11 00:36:21 +0000579 // Use MVN to emit negative constants.
580 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) {
581 unsigned Imm = (unsigned)~(CI->getSExtValue());
Chad Rosier1c47de82011-11-11 06:27:41 +0000582 bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
Chad Rosier4e89d972011-11-11 00:36:21 +0000583 (ARM_AM::getSOImmVal(Imm) != -1);
Chad Rosier1c47de82011-11-11 06:27:41 +0000584 if (UseImm) {
Chad Rosier4e89d972011-11-11 00:36:21 +0000585 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi;
586 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32));
587 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
588 TII.get(Opc), ImmReg)
589 .addImm(Imm));
590 return ImmReg;
591 }
592 }
593
594 // Load from constant pool. For now 32-bit only.
Chad Rosier44e89572011-11-04 22:29:00 +0000595 if (VT != MVT::i32)
596 return false;
597
598 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
599
Eric Christopher56d2b722010-09-02 23:43:26 +0000600 // MachineConstantPool wants an explicit alignment.
601 unsigned Align = TD.getPrefTypeAlignment(C->getType());
602 if (Align == 0) {
603 // TODO: Figure out if this is correct.
604 Align = TD.getTypeAllocSize(C->getType());
605 }
606 unsigned Idx = MCP.getConstantPoolIndex(C, Align);
Eric Christopherdccd2c32010-10-11 08:38:55 +0000607
Chad Rosier66dc8ca2011-11-08 21:12:00 +0000608 if (isThumb2)
Eric Christopher56d2b722010-09-02 23:43:26 +0000609 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
Eric Christopherfd609802010-09-28 21:55:34 +0000610 TII.get(ARM::t2LDRpci), DestReg)
611 .addConstantPoolIndex(Idx));
Eric Christopher56d2b722010-09-02 23:43:26 +0000612 else
Eric Christopherd0c82a62010-11-12 09:48:30 +0000613 // The extra immediate is for addrmode2.
Eric Christopher56d2b722010-09-02 23:43:26 +0000614 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
Eric Christopherfd609802010-09-28 21:55:34 +0000615 TII.get(ARM::LDRcp), DestReg)
616 .addConstantPoolIndex(Idx)
Jim Grosbach3e556122010-10-26 22:37:02 +0000617 .addImm(0));
Eric Christopherac1a19e2010-09-09 01:06:51 +0000618
Eric Christopher56d2b722010-09-02 23:43:26 +0000619 return DestReg;
Eric Christopher1b61ef42010-09-02 01:48:11 +0000620}
621
Patrik Hagglunda61b17c2012-12-13 06:34:11 +0000622unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
Eric Christopher890dbbe2010-10-02 00:32:44 +0000623 // For now 32-bit only.
Duncan Sandscdfad362010-11-03 12:17:33 +0000624 if (VT != MVT::i32) return 0;
Eric Christopherdccd2c32010-10-11 08:38:55 +0000625
Eric Christopher890dbbe2010-10-02 00:32:44 +0000626 Reloc::Model RelocM = TM.getRelocationModel();
Jush Luc4dc2492012-08-29 02:41:21 +0000627 bool IsIndirect = Subtarget->GVIsIndirectSymbol(GV, RelocM);
Chad Rosier6aa6e5a2012-11-07 00:13:01 +0000628 const TargetRegisterClass *RC = isThumb2 ?
629 (const TargetRegisterClass*)&ARM::rGPRRegClass :
630 (const TargetRegisterClass*)&ARM::GPRRegClass;
631 unsigned DestReg = createResultReg(RC);
Jakob Stoklund Olesen45ca7c62012-01-07 01:47:05 +0000632
JF Bastienfe532ad2013-06-14 02:49:43 +0000633 // FastISel TLS support on non-Darwin is broken, punt to SelectionDAG.
634 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
635 bool IsThreadLocal = GVar && GVar->isThreadLocal();
636 if (!Subtarget->isTargetDarwin() && IsThreadLocal) return 0;
637
Jakob Stoklund Olesen45ca7c62012-01-07 01:47:05 +0000638 // Use movw+movt when possible, it avoids constant pool entries.
Jakob Stoklund Olesen8f37a242012-01-07 20:49:15 +0000639 // Darwin targets don't support movt with Reloc::Static, see
640 // ARMTargetLowering::LowerGlobalAddressDarwin. Other targets only support
641 // static movt relocations.
642 if (Subtarget->useMovt() &&
643 Subtarget->isTargetDarwin() == (RelocM != Reloc::Static)) {
Jakob Stoklund Olesen45ca7c62012-01-07 01:47:05 +0000644 unsigned Opc;
645 switch (RelocM) {
646 case Reloc::PIC_:
647 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel;
648 break;
649 case Reloc::DynamicNoPIC:
650 Opc = isThumb2 ? ARM::t2MOV_ga_dyn : ARM::MOV_ga_dyn;
651 break;
652 default:
653 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm;
654 break;
655 }
656 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
657 DestReg).addGlobalAddress(GV));
Eric Christopher890dbbe2010-10-02 00:32:44 +0000658 } else {
Jakob Stoklund Olesen45ca7c62012-01-07 01:47:05 +0000659 // MachineConstantPool wants an explicit alignment.
660 unsigned Align = TD.getPrefTypeAlignment(GV->getType());
661 if (Align == 0) {
662 // TODO: Figure out if this is correct.
663 Align = TD.getTypeAllocSize(GV->getType());
664 }
665
Jush Lu8f506472012-09-27 05:21:41 +0000666 if (Subtarget->isTargetELF() && RelocM == Reloc::PIC_)
667 return ARMLowerPICELF(GV, Align, VT);
668
Jakob Stoklund Olesen45ca7c62012-01-07 01:47:05 +0000669 // Grab index.
670 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 :
671 (Subtarget->isThumb() ? 4 : 8);
672 unsigned Id = AFI->createPICLabelUId();
673 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id,
674 ARMCP::CPValue,
675 PCAdj);
676 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align);
677
678 // Load value.
679 MachineInstrBuilder MIB;
680 if (isThumb2) {
681 unsigned Opc = (RelocM!=Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic;
682 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
683 .addConstantPoolIndex(Idx);
684 if (RelocM == Reloc::PIC_)
685 MIB.addImm(Id);
Jush Luc4dc2492012-08-29 02:41:21 +0000686 AddOptionalDefs(MIB);
Jakob Stoklund Olesen45ca7c62012-01-07 01:47:05 +0000687 } else {
688 // The extra immediate is for addrmode2.
689 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp),
690 DestReg)
691 .addConstantPoolIndex(Idx)
692 .addImm(0);
Jush Luc4dc2492012-08-29 02:41:21 +0000693 AddOptionalDefs(MIB);
694
695 if (RelocM == Reloc::PIC_) {
696 unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD;
697 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
698
699 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
700 DL, TII.get(Opc), NewDestReg)
701 .addReg(DestReg)
702 .addImm(Id);
703 AddOptionalDefs(MIB);
704 return NewDestReg;
705 }
Jakob Stoklund Olesen45ca7c62012-01-07 01:47:05 +0000706 }
Eric Christopher890dbbe2010-10-02 00:32:44 +0000707 }
Eli Friedmand6412c92011-06-03 01:13:19 +0000708
Jush Luc4dc2492012-08-29 02:41:21 +0000709 if (IsIndirect) {
Jakob Stoklund Olesen45ca7c62012-01-07 01:47:05 +0000710 MachineInstrBuilder MIB;
Eli Friedmand6412c92011-06-03 01:13:19 +0000711 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
Chad Rosier66dc8ca2011-11-08 21:12:00 +0000712 if (isThumb2)
Jim Grosbachb04546f2011-09-13 20:30:37 +0000713 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
714 TII.get(ARM::t2LDRi12), NewDestReg)
Eli Friedmand6412c92011-06-03 01:13:19 +0000715 .addReg(DestReg)
716 .addImm(0);
717 else
718 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRi12),
719 NewDestReg)
720 .addReg(DestReg)
721 .addImm(0);
722 DestReg = NewDestReg;
723 AddOptionalDefs(MIB);
724 }
725
Eric Christopher890dbbe2010-10-02 00:32:44 +0000726 return DestReg;
Eric Christopherc9932f62010-10-01 23:24:42 +0000727}
728
Eric Christopher9ed58df2010-09-09 00:19:41 +0000729unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) {
Patrik Hagglund3d170e62012-12-17 14:30:06 +0000730 EVT CEVT = TLI.getValueType(C->getType(), true);
731
732 // Only handle simple types.
733 if (!CEVT.isSimple()) return 0;
734 MVT VT = CEVT.getSimpleVT();
Eric Christopher9ed58df2010-09-09 00:19:41 +0000735
736 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
737 return ARMMaterializeFP(CFP, VT);
Eric Christopherc9932f62010-10-01 23:24:42 +0000738 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
739 return ARMMaterializeGV(GV, VT);
740 else if (isa<ConstantInt>(C))
741 return ARMMaterializeInt(C, VT);
Eric Christopherdccd2c32010-10-11 08:38:55 +0000742
Eric Christopherc9932f62010-10-01 23:24:42 +0000743 return 0;
Eric Christopher9ed58df2010-09-09 00:19:41 +0000744}
745
Chad Rosier944d82b2011-11-17 21:46:13 +0000746// TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF);
747
Eric Christopherf9764fa2010-09-30 20:49:44 +0000748unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) {
749 // Don't handle dynamic allocas.
750 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0;
Eric Christopherdccd2c32010-10-11 08:38:55 +0000751
Duncan Sands1440e8b2010-11-03 11:35:31 +0000752 MVT VT;
Chad Rosierf4bd21c2012-05-11 16:41:38 +0000753 if (!isLoadTypeLegal(AI->getType(), VT)) return 0;
Eric Christopherdccd2c32010-10-11 08:38:55 +0000754
Eric Christopherf9764fa2010-09-30 20:49:44 +0000755 DenseMap<const AllocaInst*, int>::iterator SI =
756 FuncInfo.StaticAllocaMap.find(AI);
757
758 // This will get lowered later into the correct offsets and registers
759 // via rewriteXFrameIndex.
760 if (SI != FuncInfo.StaticAllocaMap.end()) {
Craig Topper44d23822012-02-22 05:59:10 +0000761 const TargetRegisterClass* RC = TLI.getRegClassFor(VT);
Eric Christopherf9764fa2010-09-30 20:49:44 +0000762 unsigned ResultReg = createResultReg(RC);
Chad Rosier66dc8ca2011-11-08 21:12:00 +0000763 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
Evan Chengddfd1372011-12-14 02:11:42 +0000764 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
Eric Christopherf9764fa2010-09-30 20:49:44 +0000765 TII.get(Opc), ResultReg)
766 .addFrameIndex(SI->second)
767 .addImm(0));
768 return ResultReg;
769 }
Eric Christopherdccd2c32010-10-11 08:38:55 +0000770
Eric Christopherf9764fa2010-09-30 20:49:44 +0000771 return 0;
772}
773
Chris Lattnerdb125cf2011-07-18 04:54:35 +0000774bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) {
Duncan Sands1440e8b2010-11-03 11:35:31 +0000775 EVT evt = TLI.getValueType(Ty, true);
Eric Christopherac1a19e2010-09-09 01:06:51 +0000776
Eric Christopherb1cc8482010-08-25 07:23:49 +0000777 // Only handle simple types.
Duncan Sands1440e8b2010-11-03 11:35:31 +0000778 if (evt == MVT::Other || !evt.isSimple()) return false;
779 VT = evt.getSimpleVT();
Eric Christopherac1a19e2010-09-09 01:06:51 +0000780
Eric Christopherdc908042010-08-31 01:28:42 +0000781 // Handle all legal types, i.e. a register that will directly hold this
782 // value.
783 return TLI.isTypeLegal(VT);
Eric Christopherb1cc8482010-08-25 07:23:49 +0000784}
785
Chris Lattnerdb125cf2011-07-18 04:54:35 +0000786bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) {
Eric Christopher4e68c7c2010-09-01 18:01:32 +0000787 if (isTypeLegal(Ty, VT)) return true;
Eric Christopherac1a19e2010-09-09 01:06:51 +0000788
Eric Christopher4e68c7c2010-09-01 18:01:32 +0000789 // If this is a type than can be sign or zero-extended to a basic operation
790 // go ahead and accept it now.
Chad Rosierb29b9502011-11-13 02:23:59 +0000791 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
Eric Christopher4e68c7c2010-09-01 18:01:32 +0000792 return true;
Eric Christopherac1a19e2010-09-09 01:06:51 +0000793
Eric Christopher4e68c7c2010-09-01 18:01:32 +0000794 return false;
795}
796
Eric Christopher88de86b2010-11-19 22:36:41 +0000797// Computes the address to get to an object.
Eric Christopher0d581222010-11-19 22:30:02 +0000798bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) {
Eric Christopher83007122010-08-23 21:44:12 +0000799 // Some boilerplate from the X86 FastISel.
800 const User *U = NULL;
Eric Christopher83007122010-08-23 21:44:12 +0000801 unsigned Opcode = Instruction::UserOp1;
Eric Christophercb0b04b2010-08-24 00:07:24 +0000802 if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
Eric Christopher2d630d72010-11-19 22:37:58 +0000803 // Don't walk into other basic blocks unless the object is an alloca from
804 // another block, otherwise it may not have a virtual register assigned.
Eric Christopher76dda7e2010-11-15 21:11:06 +0000805 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
806 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
807 Opcode = I->getOpcode();
808 U = I;
809 }
Eric Christophercb0b04b2010-08-24 00:07:24 +0000810 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
Eric Christopher83007122010-08-23 21:44:12 +0000811 Opcode = C->getOpcode();
812 U = C;
813 }
814
Chris Lattnerdb125cf2011-07-18 04:54:35 +0000815 if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType()))
Eric Christopher83007122010-08-23 21:44:12 +0000816 if (Ty->getAddressSpace() > 255)
817 // Fast instruction selection doesn't support the special
818 // address spaces.
819 return false;
Eric Christopherac1a19e2010-09-09 01:06:51 +0000820
Eric Christopher83007122010-08-23 21:44:12 +0000821 switch (Opcode) {
Eric Christopherac1a19e2010-09-09 01:06:51 +0000822 default:
Eric Christopher83007122010-08-23 21:44:12 +0000823 break;
Eric Christopheradde9da2013-07-12 22:08:24 +0000824 case Instruction::BitCast:
Eric Christopher55324332010-10-12 00:43:21 +0000825 // Look through bitcasts.
Eric Christopher0d581222010-11-19 22:30:02 +0000826 return ARMComputeAddress(U->getOperand(0), Addr);
Eric Christopheradde9da2013-07-12 22:08:24 +0000827 case Instruction::IntToPtr:
Eric Christopher55324332010-10-12 00:43:21 +0000828 // Look past no-op inttoptrs.
829 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy())
Eric Christopher0d581222010-11-19 22:30:02 +0000830 return ARMComputeAddress(U->getOperand(0), Addr);
Eric Christopher55324332010-10-12 00:43:21 +0000831 break;
Eric Christopheradde9da2013-07-12 22:08:24 +0000832 case Instruction::PtrToInt:
Eric Christopher55324332010-10-12 00:43:21 +0000833 // Look past no-op ptrtoints.
834 if (TLI.getValueType(U->getType()) == TLI.getPointerTy())
Eric Christopher0d581222010-11-19 22:30:02 +0000835 return ARMComputeAddress(U->getOperand(0), Addr);
Eric Christopher55324332010-10-12 00:43:21 +0000836 break;
Eric Christophereae84392010-10-14 09:29:41 +0000837 case Instruction::GetElementPtr: {
Eric Christopherb3716582010-11-19 22:39:56 +0000838 Address SavedAddr = Addr;
Eric Christopher0d581222010-11-19 22:30:02 +0000839 int TmpOffset = Addr.Offset;
Eric Christopher2896df82010-10-15 18:02:07 +0000840
Eric Christophereae84392010-10-14 09:29:41 +0000841 // Iterate through the GEP folding the constants into offsets where
842 // we can.
843 gep_type_iterator GTI = gep_type_begin(U);
844 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
845 i != e; ++i, ++GTI) {
846 const Value *Op = *i;
Chris Lattnerdb125cf2011-07-18 04:54:35 +0000847 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
Eric Christophereae84392010-10-14 09:29:41 +0000848 const StructLayout *SL = TD.getStructLayout(STy);
849 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
850 TmpOffset += SL->getElementOffset(Idx);
851 } else {
Eric Christopher2896df82010-10-15 18:02:07 +0000852 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType());
Eric Christopher7244d7c2011-03-22 19:39:17 +0000853 for (;;) {
Eric Christopher2896df82010-10-15 18:02:07 +0000854 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
855 // Constant-offset addressing.
856 TmpOffset += CI->getSExtValue() * S;
Eric Christopher7244d7c2011-03-22 19:39:17 +0000857 break;
858 }
859 if (isa<AddOperator>(Op) &&
860 (!isa<Instruction>(Op) ||
861 FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()]
862 == FuncInfo.MBB) &&
863 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) {
Eric Christopher299bbb22011-04-29 00:03:10 +0000864 // An add (in the same block) with a constant operand. Fold the
Eric Christopher7244d7c2011-03-22 19:39:17 +0000865 // constant.
Eric Christopher2896df82010-10-15 18:02:07 +0000866 ConstantInt *CI =
Eric Christopher7244d7c2011-03-22 19:39:17 +0000867 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
Eric Christopher2896df82010-10-15 18:02:07 +0000868 TmpOffset += CI->getSExtValue() * S;
Eric Christopher7244d7c2011-03-22 19:39:17 +0000869 // Iterate on the other operand.
870 Op = cast<AddOperator>(Op)->getOperand(0);
871 continue;
Eric Christopher299bbb22011-04-29 00:03:10 +0000872 }
Eric Christopher7244d7c2011-03-22 19:39:17 +0000873 // Unsupported
874 goto unsupported_gep;
875 }
Eric Christophereae84392010-10-14 09:29:41 +0000876 }
877 }
Eric Christopher2896df82010-10-15 18:02:07 +0000878
879 // Try to grab the base operand now.
Eric Christopher0d581222010-11-19 22:30:02 +0000880 Addr.Offset = TmpOffset;
881 if (ARMComputeAddress(U->getOperand(0), Addr)) return true;
Eric Christopher2896df82010-10-15 18:02:07 +0000882
883 // We failed, restore everything and try the other options.
Eric Christopherb3716582010-11-19 22:39:56 +0000884 Addr = SavedAddr;
Eric Christopher2896df82010-10-15 18:02:07 +0000885
Eric Christophereae84392010-10-14 09:29:41 +0000886 unsupported_gep:
Eric Christophereae84392010-10-14 09:29:41 +0000887 break;
888 }
Eric Christopher83007122010-08-23 21:44:12 +0000889 case Instruction::Alloca: {
Eric Christopher15418772010-10-12 05:39:06 +0000890 const AllocaInst *AI = cast<AllocaInst>(Obj);
Eric Christopher827656d2010-11-20 22:38:27 +0000891 DenseMap<const AllocaInst*, int>::iterator SI =
892 FuncInfo.StaticAllocaMap.find(AI);
893 if (SI != FuncInfo.StaticAllocaMap.end()) {
894 Addr.BaseType = Address::FrameIndexBase;
895 Addr.Base.FI = SI->second;
896 return true;
897 }
898 break;
Eric Christopher83007122010-08-23 21:44:12 +0000899 }
900 }
Eric Christopherac1a19e2010-09-09 01:06:51 +0000901
Eric Christophercb0b04b2010-08-24 00:07:24 +0000902 // Try to get this in a register if nothing else has worked.
Eric Christopher0d581222010-11-19 22:30:02 +0000903 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj);
904 return Addr.Base.Reg != 0;
Eric Christophereae84392010-10-14 09:29:41 +0000905}
906
Chad Rosier6290b932012-12-17 22:35:29 +0000907void ARMFastISel::ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3) {
Eric Christopher212ae932010-10-21 19:40:30 +0000908 bool needsLowering = false;
Chad Rosier6290b932012-12-17 22:35:29 +0000909 switch (VT.SimpleTy) {
Craig Topperbc219812012-02-07 02:50:20 +0000910 default: llvm_unreachable("Unhandled load/store type!");
Eric Christopher212ae932010-10-21 19:40:30 +0000911 case MVT::i1:
912 case MVT::i8:
Chad Rosierb29b9502011-11-13 02:23:59 +0000913 case MVT::i16:
Eric Christopher212ae932010-10-21 19:40:30 +0000914 case MVT::i32:
Chad Rosier57b29972011-11-14 20:22:27 +0000915 if (!useAM3) {
Chad Rosierb29b9502011-11-13 02:23:59 +0000916 // Integer loads/stores handle 12-bit offsets.
917 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset);
Chad Rosier57b29972011-11-14 20:22:27 +0000918 // Handle negative offsets.
Chad Rosiere489af82011-11-14 22:34:48 +0000919 if (needsLowering && isThumb2)
920 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 &&
921 Addr.Offset > -256);
Chad Rosier57b29972011-11-14 20:22:27 +0000922 } else {
Chad Rosier5be833d2011-11-13 04:25:02 +0000923 // ARM halfword load/stores and signed byte loads use +/-imm8 offsets.
Chad Rosierdc9205d2011-11-14 04:09:28 +0000924 needsLowering = (Addr.Offset > 255 || Addr.Offset < -255);
Chad Rosier57b29972011-11-14 20:22:27 +0000925 }
Eric Christopher212ae932010-10-21 19:40:30 +0000926 break;
927 case MVT::f32:
928 case MVT::f64:
929 // Floating point operands handle 8-bit offsets.
Eric Christopher0d581222010-11-19 22:30:02 +0000930 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset);
Eric Christopher212ae932010-10-21 19:40:30 +0000931 break;
932 }
Jim Grosbach6b156392010-10-27 21:39:08 +0000933
Eric Christopher827656d2010-11-20 22:38:27 +0000934 // If this is a stack pointer and the offset needs to be simplified then
935 // put the alloca address into a register, set the base type back to
936 // register and continue. This should almost never happen.
937 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) {
Craig Topper420761a2012-04-20 07:30:17 +0000938 const TargetRegisterClass *RC = isThumb2 ?
939 (const TargetRegisterClass*)&ARM::tGPRRegClass :
940 (const TargetRegisterClass*)&ARM::GPRRegClass;
Eric Christopher827656d2010-11-20 22:38:27 +0000941 unsigned ResultReg = createResultReg(RC);
Chad Rosier66dc8ca2011-11-08 21:12:00 +0000942 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
Evan Chengddfd1372011-12-14 02:11:42 +0000943 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
Eric Christopher827656d2010-11-20 22:38:27 +0000944 TII.get(Opc), ResultReg)
945 .addFrameIndex(Addr.Base.FI)
946 .addImm(0));
947 Addr.Base.Reg = ResultReg;
948 Addr.BaseType = Address::RegBase;
949 }
950
Eric Christopher212ae932010-10-21 19:40:30 +0000951 // Since the offset is too large for the load/store instruction
Eric Christopher318b6ee2010-09-02 00:53:56 +0000952 // get the reg+offset into a register.
Eric Christopher212ae932010-10-21 19:40:30 +0000953 if (needsLowering) {
Eli Friedman9ebf57a2011-04-29 21:22:56 +0000954 Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg,
955 /*Op0IsKill*/false, Addr.Offset, MVT::i32);
Eric Christopher0d581222010-11-19 22:30:02 +0000956 Addr.Offset = 0;
Eric Christopher318b6ee2010-09-02 00:53:56 +0000957 }
Eric Christopher83007122010-08-23 21:44:12 +0000958}
959
Chad Rosier6290b932012-12-17 22:35:29 +0000960void ARMFastISel::AddLoadStoreOperands(MVT VT, Address &Addr,
Cameron Zwarichc152aa62011-05-28 20:34:49 +0000961 const MachineInstrBuilder &MIB,
Chad Rosierb29b9502011-11-13 02:23:59 +0000962 unsigned Flags, bool useAM3) {
Eric Christopher564857f2010-12-01 01:40:24 +0000963 // addrmode5 output depends on the selection dag addressing dividing the
964 // offset by 4 that it then later multiplies. Do this here as well.
Chad Rosier6290b932012-12-17 22:35:29 +0000965 if (VT.SimpleTy == MVT::f32 || VT.SimpleTy == MVT::f64)
Eric Christopher564857f2010-12-01 01:40:24 +0000966 Addr.Offset /= 4;
Eric Christopher299bbb22011-04-29 00:03:10 +0000967
Eric Christopher564857f2010-12-01 01:40:24 +0000968 // Frame base works a bit differently. Handle it separately.
969 if (Addr.BaseType == Address::FrameIndexBase) {
970 int FI = Addr.Base.FI;
971 int Offset = Addr.Offset;
972 MachineMemOperand *MMO =
973 FuncInfo.MF->getMachineMemOperand(
974 MachinePointerInfo::getFixedStack(FI, Offset),
Cameron Zwarichc152aa62011-05-28 20:34:49 +0000975 Flags,
Eric Christopher564857f2010-12-01 01:40:24 +0000976 MFI.getObjectSize(FI),
977 MFI.getObjectAlignment(FI));
978 // Now add the rest of the operands.
979 MIB.addFrameIndex(FI);
980
Bob Wilson6ce2dea2011-12-04 00:52:23 +0000981 // ARM halfword load/stores and signed byte loads need an additional
982 // operand.
Chad Rosierdc9205d2011-11-14 04:09:28 +0000983 if (useAM3) {
984 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
985 MIB.addReg(0);
986 MIB.addImm(Imm);
987 } else {
988 MIB.addImm(Addr.Offset);
989 }
Eric Christopher564857f2010-12-01 01:40:24 +0000990 MIB.addMemOperand(MMO);
991 } else {
992 // Now add the rest of the operands.
993 MIB.addReg(Addr.Base.Reg);
Eric Christopher299bbb22011-04-29 00:03:10 +0000994
Bob Wilson6ce2dea2011-12-04 00:52:23 +0000995 // ARM halfword load/stores and signed byte loads need an additional
996 // operand.
Chad Rosierdc9205d2011-11-14 04:09:28 +0000997 if (useAM3) {
998 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
999 MIB.addReg(0);
1000 MIB.addImm(Imm);
1001 } else {
1002 MIB.addImm(Addr.Offset);
1003 }
Eric Christopher564857f2010-12-01 01:40:24 +00001004 }
1005 AddOptionalDefs(MIB);
1006}
1007
Patrik Hagglunda61b17c2012-12-13 06:34:11 +00001008bool ARMFastISel::ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
Chad Rosier8a9bce92011-12-13 19:22:14 +00001009 unsigned Alignment, bool isZExt, bool allocReg) {
Eric Christopherdc908042010-08-31 01:28:42 +00001010 unsigned Opc;
Chad Rosierb29b9502011-11-13 02:23:59 +00001011 bool useAM3 = false;
Chad Rosier8a9bce92011-12-13 19:22:14 +00001012 bool needVMOV = false;
Craig Topper44d23822012-02-22 05:59:10 +00001013 const TargetRegisterClass *RC;
Patrik Hagglunda61b17c2012-12-13 06:34:11 +00001014 switch (VT.SimpleTy) {
Eric Christopher564857f2010-12-01 01:40:24 +00001015 // This is mostly going to be Neon/vector support.
1016 default: return false;
Chad Rosier646abbf2011-11-11 02:38:59 +00001017 case MVT::i1:
Eric Christopher4e68c7c2010-09-01 18:01:32 +00001018 case MVT::i8:
Chad Rosier57b29972011-11-14 20:22:27 +00001019 if (isThumb2) {
1020 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1021 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8;
1022 else
1023 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12;
Chad Rosierb29b9502011-11-13 02:23:59 +00001024 } else {
Chad Rosier57b29972011-11-14 20:22:27 +00001025 if (isZExt) {
1026 Opc = ARM::LDRBi12;
1027 } else {
1028 Opc = ARM::LDRSB;
1029 useAM3 = true;
1030 }
Chad Rosierb29b9502011-11-13 02:23:59 +00001031 }
JF Bastien1fe907e2013-06-09 00:20:24 +00001032 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
Eric Christopher4e68c7c2010-09-01 18:01:32 +00001033 break;
Chad Rosier73463472011-11-09 21:30:12 +00001034 case MVT::i16:
Chad Rosierb3235b12012-11-09 18:25:27 +00001035 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem())
Chad Rosierd70c98e2012-09-21 00:41:42 +00001036 return false;
1037
Chad Rosier57b29972011-11-14 20:22:27 +00001038 if (isThumb2) {
1039 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1040 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8;
1041 else
1042 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12;
1043 } else {
1044 Opc = isZExt ? ARM::LDRH : ARM::LDRSH;
1045 useAM3 = true;
1046 }
JF Bastien1fe907e2013-06-09 00:20:24 +00001047 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
Chad Rosier73463472011-11-09 21:30:12 +00001048 break;
Eric Christopherdc908042010-08-31 01:28:42 +00001049 case MVT::i32:
Chad Rosierb3235b12012-11-09 18:25:27 +00001050 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem())
Chad Rosiere5e674b2012-09-21 16:58:35 +00001051 return false;
1052
Chad Rosier57b29972011-11-14 20:22:27 +00001053 if (isThumb2) {
1054 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1055 Opc = ARM::t2LDRi8;
1056 else
1057 Opc = ARM::t2LDRi12;
1058 } else {
1059 Opc = ARM::LDRi12;
1060 }
JF Bastien1fe907e2013-06-09 00:20:24 +00001061 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
Eric Christopherdc908042010-08-31 01:28:42 +00001062 break;
Eric Christopher6dab1372010-09-18 01:59:37 +00001063 case MVT::f32:
Chad Rosier6762f8f2011-12-14 17:55:03 +00001064 if (!Subtarget->hasVFP2()) return false;
Chad Rosier8a9bce92011-12-13 19:22:14 +00001065 // Unaligned loads need special handling. Floats require word-alignment.
1066 if (Alignment && Alignment < 4) {
1067 needVMOV = true;
1068 VT = MVT::i32;
1069 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
JF Bastien1fe907e2013-06-09 00:20:24 +00001070 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
Chad Rosier8a9bce92011-12-13 19:22:14 +00001071 } else {
1072 Opc = ARM::VLDRS;
1073 RC = TLI.getRegClassFor(VT);
1074 }
Eric Christopher6dab1372010-09-18 01:59:37 +00001075 break;
1076 case MVT::f64:
Chad Rosier6762f8f2011-12-14 17:55:03 +00001077 if (!Subtarget->hasVFP2()) return false;
Chad Rosier404ed3c2011-12-14 17:26:05 +00001078 // FIXME: Unaligned loads need special handling. Doublewords require
1079 // word-alignment.
1080 if (Alignment && Alignment < 4)
Chad Rosier8a9bce92011-12-13 19:22:14 +00001081 return false;
Chad Rosier404ed3c2011-12-14 17:26:05 +00001082
Eric Christopher6dab1372010-09-18 01:59:37 +00001083 Opc = ARM::VLDRD;
Eric Christopheree56ea62010-10-07 05:50:44 +00001084 RC = TLI.getRegClassFor(VT);
Eric Christopher6dab1372010-09-18 01:59:37 +00001085 break;
Eric Christopherb1cc8482010-08-25 07:23:49 +00001086 }
Eric Christopher564857f2010-12-01 01:40:24 +00001087 // Simplify this down to something we can handle.
Chad Rosierb29b9502011-11-13 02:23:59 +00001088 ARMSimplifyAddress(Addr, VT, useAM3);
Jim Grosbach6b156392010-10-27 21:39:08 +00001089
Eric Christopher564857f2010-12-01 01:40:24 +00001090 // Create the base instruction, then add the operands.
Chad Rosierb29b9502011-11-13 02:23:59 +00001091 if (allocReg)
1092 ResultReg = createResultReg(RC);
1093 assert (ResultReg > 255 && "Expected an allocated virtual register.");
Eric Christopher564857f2010-12-01 01:40:24 +00001094 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1095 TII.get(Opc), ResultReg);
Chad Rosierb29b9502011-11-13 02:23:59 +00001096 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3);
Chad Rosier8a9bce92011-12-13 19:22:14 +00001097
1098 // If we had an unaligned load of a float we've converted it to an regular
1099 // load. Now we must move from the GRP to the FP register.
1100 if (needVMOV) {
1101 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32));
1102 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1103 TII.get(ARM::VMOVSR), MoveReg)
1104 .addReg(ResultReg));
1105 ResultReg = MoveReg;
1106 }
Eric Christopherdc908042010-08-31 01:28:42 +00001107 return true;
Eric Christopherb1cc8482010-08-25 07:23:49 +00001108}
1109
Eric Christopher43b62be2010-09-27 06:02:23 +00001110bool ARMFastISel::SelectLoad(const Instruction *I) {
Eli Friedman4136d232011-09-02 22:33:24 +00001111 // Atomic loads need special handling.
1112 if (cast<LoadInst>(I)->isAtomic())
1113 return false;
1114
Eric Christopherdb12b2b2010-09-10 00:34:35 +00001115 // Verify we have a legal type before going any further.
Duncan Sands1440e8b2010-11-03 11:35:31 +00001116 MVT VT;
Eric Christopherdb12b2b2010-09-10 00:34:35 +00001117 if (!isLoadTypeLegal(I->getType(), VT))
1118 return false;
1119
Eric Christopher564857f2010-12-01 01:40:24 +00001120 // See if we can handle this address.
Eric Christopher0d581222010-11-19 22:30:02 +00001121 Address Addr;
Eric Christopher564857f2010-12-01 01:40:24 +00001122 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false;
Eric Christopherdb12b2b2010-09-10 00:34:35 +00001123
1124 unsigned ResultReg;
Chad Rosier8a9bce92011-12-13 19:22:14 +00001125 if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment()))
1126 return false;
Eric Christopherdb12b2b2010-09-10 00:34:35 +00001127 UpdateValueMap(I, ResultReg);
1128 return true;
1129}
1130
Patrik Hagglunda61b17c2012-12-13 06:34:11 +00001131bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
Bob Wilson6ce2dea2011-12-04 00:52:23 +00001132 unsigned Alignment) {
Eric Christopher318b6ee2010-09-02 00:53:56 +00001133 unsigned StrOpc;
Chad Rosierb29b9502011-11-13 02:23:59 +00001134 bool useAM3 = false;
Patrik Hagglunda61b17c2012-12-13 06:34:11 +00001135 switch (VT.SimpleTy) {
Eric Christopher564857f2010-12-01 01:40:24 +00001136 // This is mostly going to be Neon/vector support.
Eric Christopher318b6ee2010-09-02 00:53:56 +00001137 default: return false;
Eric Christopher4c914122010-11-02 23:59:09 +00001138 case MVT::i1: {
Craig Topper420761a2012-04-20 07:30:17 +00001139 unsigned Res = createResultReg(isThumb2 ?
1140 (const TargetRegisterClass*)&ARM::tGPRRegClass :
1141 (const TargetRegisterClass*)&ARM::GPRRegClass);
Chad Rosier66dc8ca2011-11-08 21:12:00 +00001142 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
Eric Christopher4c914122010-11-02 23:59:09 +00001143 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1144 TII.get(Opc), Res)
1145 .addReg(SrcReg).addImm(1));
1146 SrcReg = Res;
1147 } // Fallthrough here.
Eric Christopher2896df82010-10-15 18:02:07 +00001148 case MVT::i8:
Chad Rosier57b29972011-11-14 20:22:27 +00001149 if (isThumb2) {
1150 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1151 StrOpc = ARM::t2STRBi8;
1152 else
1153 StrOpc = ARM::t2STRBi12;
1154 } else {
1155 StrOpc = ARM::STRBi12;
1156 }
Eric Christopher15418772010-10-12 05:39:06 +00001157 break;
1158 case MVT::i16:
Chad Rosierb3235b12012-11-09 18:25:27 +00001159 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem())
Chad Rosierd70c98e2012-09-21 00:41:42 +00001160 return false;
1161
Chad Rosier57b29972011-11-14 20:22:27 +00001162 if (isThumb2) {
1163 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1164 StrOpc = ARM::t2STRHi8;
1165 else
1166 StrOpc = ARM::t2STRHi12;
1167 } else {
1168 StrOpc = ARM::STRH;
1169 useAM3 = true;
1170 }
Eric Christopher15418772010-10-12 05:39:06 +00001171 break;
Eric Christopher47650ec2010-10-16 01:10:35 +00001172 case MVT::i32:
Chad Rosierb3235b12012-11-09 18:25:27 +00001173 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem())
Chad Rosiere5e674b2012-09-21 16:58:35 +00001174 return false;
1175
Chad Rosier57b29972011-11-14 20:22:27 +00001176 if (isThumb2) {
1177 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1178 StrOpc = ARM::t2STRi8;
1179 else
1180 StrOpc = ARM::t2STRi12;
1181 } else {
1182 StrOpc = ARM::STRi12;
1183 }
Eric Christopher47650ec2010-10-16 01:10:35 +00001184 break;
Eric Christopher56d2b722010-09-02 23:43:26 +00001185 case MVT::f32:
1186 if (!Subtarget->hasVFP2()) return false;
Chad Rosiered42c5f2011-12-06 01:44:17 +00001187 // Unaligned stores need special handling. Floats require word-alignment.
Chad Rosier9eff1e32011-12-03 02:21:57 +00001188 if (Alignment && Alignment < 4) {
1189 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32));
1190 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1191 TII.get(ARM::VMOVRS), MoveReg)
1192 .addReg(SrcReg));
1193 SrcReg = MoveReg;
1194 VT = MVT::i32;
1195 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12;
Chad Rosier64ac91b2011-12-14 17:32:02 +00001196 } else {
1197 StrOpc = ARM::VSTRS;
Chad Rosier9eff1e32011-12-03 02:21:57 +00001198 }
Eric Christopher56d2b722010-09-02 23:43:26 +00001199 break;
1200 case MVT::f64:
1201 if (!Subtarget->hasVFP2()) return false;
Chad Rosiered42c5f2011-12-06 01:44:17 +00001202 // FIXME: Unaligned stores need special handling. Doublewords require
1203 // word-alignment.
Chad Rosier404ed3c2011-12-14 17:26:05 +00001204 if (Alignment && Alignment < 4)
Chad Rosier9eff1e32011-12-03 02:21:57 +00001205 return false;
Chad Rosier404ed3c2011-12-14 17:26:05 +00001206
Eric Christopher56d2b722010-09-02 23:43:26 +00001207 StrOpc = ARM::VSTRD;
1208 break;
Eric Christopher318b6ee2010-09-02 00:53:56 +00001209 }
Eric Christopher564857f2010-12-01 01:40:24 +00001210 // Simplify this down to something we can handle.
Chad Rosierb29b9502011-11-13 02:23:59 +00001211 ARMSimplifyAddress(Addr, VT, useAM3);
Jim Grosbach6b156392010-10-27 21:39:08 +00001212
Eric Christopher564857f2010-12-01 01:40:24 +00001213 // Create the base instruction, then add the operands.
1214 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1215 TII.get(StrOpc))
Chad Rosier3bdb3c92011-11-17 01:16:53 +00001216 .addReg(SrcReg);
Chad Rosierb29b9502011-11-13 02:23:59 +00001217 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3);
Eric Christopher318b6ee2010-09-02 00:53:56 +00001218 return true;
1219}
1220
Eric Christopher43b62be2010-09-27 06:02:23 +00001221bool ARMFastISel::SelectStore(const Instruction *I) {
Eric Christopher318b6ee2010-09-02 00:53:56 +00001222 Value *Op0 = I->getOperand(0);
1223 unsigned SrcReg = 0;
1224
Eli Friedman4136d232011-09-02 22:33:24 +00001225 // Atomic stores need special handling.
1226 if (cast<StoreInst>(I)->isAtomic())
1227 return false;
1228
Eric Christopher564857f2010-12-01 01:40:24 +00001229 // Verify we have a legal type before going any further.
Duncan Sands1440e8b2010-11-03 11:35:31 +00001230 MVT VT;
Eric Christopher318b6ee2010-09-02 00:53:56 +00001231 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT))
Eric Christopher543cf052010-09-01 22:16:27 +00001232 return false;
Eric Christopher318b6ee2010-09-02 00:53:56 +00001233
Eric Christopher1b61ef42010-09-02 01:48:11 +00001234 // Get the value to be stored into a register.
1235 SrcReg = getRegForValue(Op0);
Eric Christopher564857f2010-12-01 01:40:24 +00001236 if (SrcReg == 0) return false;
Eric Christopherac1a19e2010-09-09 01:06:51 +00001237
Eric Christopher564857f2010-12-01 01:40:24 +00001238 // See if we can handle this address.
Eric Christopher0d581222010-11-19 22:30:02 +00001239 Address Addr;
Eric Christopher0d581222010-11-19 22:30:02 +00001240 if (!ARMComputeAddress(I->getOperand(1), Addr))
Eric Christopher318b6ee2010-09-02 00:53:56 +00001241 return false;
Eric Christopherac1a19e2010-09-09 01:06:51 +00001242
Chad Rosier9eff1e32011-12-03 02:21:57 +00001243 if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment()))
1244 return false;
Eric Christophera5b1e682010-09-17 22:28:18 +00001245 return true;
1246}
1247
1248static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) {
1249 switch (Pred) {
1250 // Needs two compares...
1251 case CmpInst::FCMP_ONE:
Eric Christopherdccd2c32010-10-11 08:38:55 +00001252 case CmpInst::FCMP_UEQ:
Eric Christophera5b1e682010-09-17 22:28:18 +00001253 default:
Eric Christopher4053e632010-11-02 01:24:49 +00001254 // AL is our "false" for now. The other two need more compares.
Eric Christophera5b1e682010-09-17 22:28:18 +00001255 return ARMCC::AL;
1256 case CmpInst::ICMP_EQ:
1257 case CmpInst::FCMP_OEQ:
1258 return ARMCC::EQ;
1259 case CmpInst::ICMP_SGT:
1260 case CmpInst::FCMP_OGT:
1261 return ARMCC::GT;
1262 case CmpInst::ICMP_SGE:
1263 case CmpInst::FCMP_OGE:
1264 return ARMCC::GE;
1265 case CmpInst::ICMP_UGT:
1266 case CmpInst::FCMP_UGT:
1267 return ARMCC::HI;
1268 case CmpInst::FCMP_OLT:
1269 return ARMCC::MI;
1270 case CmpInst::ICMP_ULE:
1271 case CmpInst::FCMP_OLE:
1272 return ARMCC::LS;
1273 case CmpInst::FCMP_ORD:
1274 return ARMCC::VC;
1275 case CmpInst::FCMP_UNO:
1276 return ARMCC::VS;
1277 case CmpInst::FCMP_UGE:
1278 return ARMCC::PL;
1279 case CmpInst::ICMP_SLT:
1280 case CmpInst::FCMP_ULT:
Eric Christopherdccd2c32010-10-11 08:38:55 +00001281 return ARMCC::LT;
Eric Christophera5b1e682010-09-17 22:28:18 +00001282 case CmpInst::ICMP_SLE:
1283 case CmpInst::FCMP_ULE:
1284 return ARMCC::LE;
1285 case CmpInst::FCMP_UNE:
1286 case CmpInst::ICMP_NE:
1287 return ARMCC::NE;
1288 case CmpInst::ICMP_UGE:
1289 return ARMCC::HS;
1290 case CmpInst::ICMP_ULT:
1291 return ARMCC::LO;
1292 }
Eric Christopher543cf052010-09-01 22:16:27 +00001293}
1294
Eric Christopher43b62be2010-09-27 06:02:23 +00001295bool ARMFastISel::SelectBranch(const Instruction *I) {
Eric Christophere5734102010-09-03 00:35:47 +00001296 const BranchInst *BI = cast<BranchInst>(I);
1297 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
1298 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
Eric Christopherac1a19e2010-09-09 01:06:51 +00001299
Eric Christophere5734102010-09-03 00:35:47 +00001300 // Simple branch support.
Jim Grosbach16cb3762010-11-09 19:22:26 +00001301
Eric Christopher0e6233b2010-10-29 21:08:19 +00001302 // If we can, avoid recomputing the compare - redoing it could lead to wonky
1303 // behavior.
Eric Christopher0e6233b2010-10-29 21:08:19 +00001304 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
Chad Rosier75698f32011-10-26 23:17:28 +00001305 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) {
Eric Christopher0e6233b2010-10-29 21:08:19 +00001306
1307 // Get the compare predicate.
Eric Christopher632ae892011-04-29 21:56:31 +00001308 // Try to take advantage of fallthrough opportunities.
1309 CmpInst::Predicate Predicate = CI->getPredicate();
1310 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1311 std::swap(TBB, FBB);
1312 Predicate = CmpInst::getInversePredicate(Predicate);
1313 }
1314
1315 ARMCC::CondCodes ARMPred = getComparePred(Predicate);
Eric Christopher0e6233b2010-10-29 21:08:19 +00001316
1317 // We may not handle every CC for now.
1318 if (ARMPred == ARMCC::AL) return false;
1319
Chad Rosier75698f32011-10-26 23:17:28 +00001320 // Emit the compare.
Chad Rosiere07cd5e2011-11-02 18:08:25 +00001321 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned()))
Chad Rosier75698f32011-10-26 23:17:28 +00001322 return false;
Jim Grosbach16cb3762010-11-09 19:22:26 +00001323
Chad Rosier66dc8ca2011-11-08 21:12:00 +00001324 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
Eric Christopher0e6233b2010-10-29 21:08:19 +00001325 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
1326 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR);
1327 FastEmitBranch(FBB, DL);
1328 FuncInfo.MBB->addSuccessor(TBB);
1329 return true;
1330 }
Eric Christopherbcf26ae2011-04-29 20:02:39 +00001331 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) {
1332 MVT SourceVT;
1333 if (TI->hasOneUse() && TI->getParent() == I->getParent() &&
Eli Friedman76927d732011-05-25 23:49:02 +00001334 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) {
Chad Rosier66dc8ca2011-11-08 21:12:00 +00001335 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
Eric Christopherbcf26ae2011-04-29 20:02:39 +00001336 unsigned OpReg = getRegForValue(TI->getOperand(0));
1337 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1338 TII.get(TstOpc))
1339 .addReg(OpReg).addImm(1));
1340
1341 unsigned CCMode = ARMCC::NE;
1342 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1343 std::swap(TBB, FBB);
1344 CCMode = ARMCC::EQ;
1345 }
1346
Chad Rosier66dc8ca2011-11-08 21:12:00 +00001347 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
Eric Christopherbcf26ae2011-04-29 20:02:39 +00001348 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
1349 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR);
1350
1351 FastEmitBranch(FBB, DL);
1352 FuncInfo.MBB->addSuccessor(TBB);
1353 return true;
1354 }
Chad Rosier6d64b3a2011-10-27 00:21:16 +00001355 } else if (const ConstantInt *CI =
1356 dyn_cast<ConstantInt>(BI->getCondition())) {
1357 uint64_t Imm = CI->getZExtValue();
1358 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB;
1359 FastEmitBranch(Target, DL);
1360 return true;
Eric Christopher0e6233b2010-10-29 21:08:19 +00001361 }
Jim Grosbach16cb3762010-11-09 19:22:26 +00001362
Eric Christopher0e6233b2010-10-29 21:08:19 +00001363 unsigned CmpReg = getRegForValue(BI->getCondition());
1364 if (CmpReg == 0) return false;
Eric Christopherac1a19e2010-09-09 01:06:51 +00001365
Stuart Hastingsc5eecbc2011-04-16 03:31:26 +00001366 // We've been divorced from our compare! Our block was split, and
1367 // now our compare lives in a predecessor block. We musn't
1368 // re-compare here, as the children of the compare aren't guaranteed
1369 // live across the block boundary (we *could* check for this).
1370 // Regardless, the compare has been done in the predecessor block,
1371 // and it left a value for us in a virtual register. Ergo, we test
1372 // the one-bit value left in the virtual register.
Chad Rosier66dc8ca2011-11-08 21:12:00 +00001373 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
Stuart Hastingsc5eecbc2011-04-16 03:31:26 +00001374 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc))
1375 .addReg(CmpReg).addImm(1));
Eric Christopherdccd2c32010-10-11 08:38:55 +00001376
Eric Christopher7a20a372011-04-28 16:52:09 +00001377 unsigned CCMode = ARMCC::NE;
1378 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1379 std::swap(TBB, FBB);
1380 CCMode = ARMCC::EQ;
1381 }
1382
Chad Rosier66dc8ca2011-11-08 21:12:00 +00001383 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
Eric Christophere5734102010-09-03 00:35:47 +00001384 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
Eric Christopher7a20a372011-04-28 16:52:09 +00001385 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR);
Eric Christophere5734102010-09-03 00:35:47 +00001386 FastEmitBranch(FBB, DL);
1387 FuncInfo.MBB->addSuccessor(TBB);
Eric Christopherdccd2c32010-10-11 08:38:55 +00001388 return true;
Eric Christophere5734102010-09-03 00:35:47 +00001389}
1390
Chad Rosier60c8fa62012-02-07 23:56:08 +00001391bool ARMFastISel::SelectIndirectBr(const Instruction *I) {
1392 unsigned AddrReg = getRegForValue(I->getOperand(0));
1393 if (AddrReg == 0) return false;
1394
1395 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX;
1396 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc))
1397 .addReg(AddrReg));
Bill Wendling8f47fc82012-10-22 23:30:04 +00001398
1399 const IndirectBrInst *IB = cast<IndirectBrInst>(I);
1400 for (unsigned i = 0, e = IB->getNumSuccessors(); i != e; ++i)
1401 FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[IB->getSuccessor(i)]);
1402
Jush Luefc967e2012-06-14 06:08:19 +00001403 return true;
Chad Rosier60c8fa62012-02-07 23:56:08 +00001404}
1405
Chad Rosiere07cd5e2011-11-02 18:08:25 +00001406bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
1407 bool isZExt) {
Chad Rosierade62002011-10-26 23:25:44 +00001408 Type *Ty = Src1Value->getType();
Patrik Hagglund3d170e62012-12-17 14:30:06 +00001409 EVT SrcEVT = TLI.getValueType(Ty, true);
1410 if (!SrcEVT.isSimple()) return false;
1411 MVT SrcVT = SrcEVT.getSimpleVT();
Eric Christopherac1a19e2010-09-09 01:06:51 +00001412
Chad Rosierade62002011-10-26 23:25:44 +00001413 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy());
1414 if (isFloat && !Subtarget->hasVFP2())
Eric Christopherd43393a2010-09-08 23:13:45 +00001415 return false;
Eric Christopherac1a19e2010-09-09 01:06:51 +00001416
Chad Rosier2f2fe412011-11-09 03:22:02 +00001417 // Check to see if the 2nd operand is a constant that we can encode directly
1418 // in the compare.
Chad Rosier1c47de82011-11-11 06:27:41 +00001419 int Imm = 0;
1420 bool UseImm = false;
Chad Rosier2f2fe412011-11-09 03:22:02 +00001421 bool isNegativeImm = false;
Chad Rosierf56c60b2011-11-16 00:32:20 +00001422 // FIXME: At -O0 we don't have anything that canonicalizes operand order.
1423 // Thus, Src1Value may be a ConstantInt, but we're missing it.
Chad Rosier2f2fe412011-11-09 03:22:02 +00001424 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) {
1425 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 ||
1426 SrcVT == MVT::i1) {
1427 const APInt &CIVal = ConstInt->getValue();
Chad Rosier1c47de82011-11-11 06:27:41 +00001428 Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue();
Chad Rosier0ac754f2012-03-15 22:54:20 +00001429 // For INT_MIN/LONG_MIN (i.e., 0x80000000) we need to use a cmp, rather
1430 // then a cmn, because there is no way to represent 2147483648 as a
1431 // signed 32-bit int.
1432 if (Imm < 0 && Imm != (int)0x80000000) {
1433 isNegativeImm = true;
1434 Imm = -Imm;
Chad Rosier6cba97c2011-11-10 01:30:39 +00001435 }
Chad Rosier0ac754f2012-03-15 22:54:20 +00001436 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
1437 (ARM_AM::getSOImmVal(Imm) != -1);
Chad Rosier2f2fe412011-11-09 03:22:02 +00001438 }
1439 } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) {
1440 if (SrcVT == MVT::f32 || SrcVT == MVT::f64)
1441 if (ConstFP->isZero() && !ConstFP->isNegative())
Chad Rosier1c47de82011-11-11 06:27:41 +00001442 UseImm = true;
Chad Rosier2f2fe412011-11-09 03:22:02 +00001443 }
1444
Eric Christopherd43393a2010-09-08 23:13:45 +00001445 unsigned CmpOpc;
Chad Rosier2f2fe412011-11-09 03:22:02 +00001446 bool isICmp = true;
Chad Rosiere07cd5e2011-11-02 18:08:25 +00001447 bool needsExt = false;
Patrik Hagglunda61b17c2012-12-13 06:34:11 +00001448 switch (SrcVT.SimpleTy) {
Eric Christopherd43393a2010-09-08 23:13:45 +00001449 default: return false;
1450 // TODO: Verify compares.
1451 case MVT::f32:
Chad Rosier2f2fe412011-11-09 03:22:02 +00001452 isICmp = false;
Chad Rosier1c47de82011-11-11 06:27:41 +00001453 CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES;
Eric Christopherd43393a2010-09-08 23:13:45 +00001454 break;
1455 case MVT::f64:
Chad Rosier2f2fe412011-11-09 03:22:02 +00001456 isICmp = false;
Chad Rosier1c47de82011-11-11 06:27:41 +00001457 CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED;
Eric Christopherd43393a2010-09-08 23:13:45 +00001458 break;
Chad Rosiere07cd5e2011-11-02 18:08:25 +00001459 case MVT::i1:
1460 case MVT::i8:
1461 case MVT::i16:
1462 needsExt = true;
1463 // Intentional fall-through.
Eric Christopherd43393a2010-09-08 23:13:45 +00001464 case MVT::i32:
Chad Rosier2f2fe412011-11-09 03:22:02 +00001465 if (isThumb2) {
Chad Rosier1c47de82011-11-11 06:27:41 +00001466 if (!UseImm)
Chad Rosier2f2fe412011-11-09 03:22:02 +00001467 CmpOpc = ARM::t2CMPrr;
1468 else
Bill Wendlingad5c8802012-06-11 08:07:26 +00001469 CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri;
Chad Rosier2f2fe412011-11-09 03:22:02 +00001470 } else {
Chad Rosier1c47de82011-11-11 06:27:41 +00001471 if (!UseImm)
Chad Rosier2f2fe412011-11-09 03:22:02 +00001472 CmpOpc = ARM::CMPrr;
1473 else
Bill Wendlingad5c8802012-06-11 08:07:26 +00001474 CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri;
Chad Rosier2f2fe412011-11-09 03:22:02 +00001475 }
Eric Christopherd43393a2010-09-08 23:13:45 +00001476 break;
1477 }
1478
Chad Rosiere07cd5e2011-11-02 18:08:25 +00001479 unsigned SrcReg1 = getRegForValue(Src1Value);
1480 if (SrcReg1 == 0) return false;
Chad Rosier530f7ce2011-10-26 22:47:55 +00001481
Duncan Sands4c0c5452011-11-28 10:31:27 +00001482 unsigned SrcReg2 = 0;
Chad Rosier1c47de82011-11-11 06:27:41 +00001483 if (!UseImm) {
Chad Rosier2f2fe412011-11-09 03:22:02 +00001484 SrcReg2 = getRegForValue(Src2Value);
1485 if (SrcReg2 == 0) return false;
1486 }
Chad Rosiere07cd5e2011-11-02 18:08:25 +00001487
1488 // We have i1, i8, or i16, we need to either zero extend or sign extend.
1489 if (needsExt) {
Chad Rosiera69feb02012-02-16 22:45:33 +00001490 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt);
1491 if (SrcReg1 == 0) return false;
Chad Rosier1c47de82011-11-11 06:27:41 +00001492 if (!UseImm) {
Chad Rosiera69feb02012-02-16 22:45:33 +00001493 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt);
1494 if (SrcReg2 == 0) return false;
Chad Rosier2f2fe412011-11-09 03:22:02 +00001495 }
Chad Rosiere07cd5e2011-11-02 18:08:25 +00001496 }
Chad Rosier530f7ce2011-10-26 22:47:55 +00001497
Chad Rosier1c47de82011-11-11 06:27:41 +00001498 if (!UseImm) {
Chad Rosier2f2fe412011-11-09 03:22:02 +00001499 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1500 TII.get(CmpOpc))
1501 .addReg(SrcReg1).addReg(SrcReg2));
1502 } else {
1503 MachineInstrBuilder MIB;
1504 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
1505 .addReg(SrcReg1);
1506
1507 // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0.
1508 if (isICmp)
Chad Rosier1c47de82011-11-11 06:27:41 +00001509 MIB.addImm(Imm);
Chad Rosier2f2fe412011-11-09 03:22:02 +00001510 AddOptionalDefs(MIB);
1511 }
Chad Rosierade62002011-10-26 23:25:44 +00001512
1513 // For floating point we need to move the result to a comparison register
1514 // that we can then use for branches.
1515 if (Ty->isFloatTy() || Ty->isDoubleTy())
1516 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1517 TII.get(ARM::FMSTAT)));
Chad Rosier530f7ce2011-10-26 22:47:55 +00001518 return true;
1519}
1520
1521bool ARMFastISel::SelectCmp(const Instruction *I) {
1522 const CmpInst *CI = cast<CmpInst>(I);
1523
Eric Christopher229207a2010-09-29 01:14:47 +00001524 // Get the compare predicate.
1525 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate());
Eric Christopherdccd2c32010-10-11 08:38:55 +00001526
Eric Christopher229207a2010-09-29 01:14:47 +00001527 // We may not handle every CC for now.
1528 if (ARMPred == ARMCC::AL) return false;
1529
Chad Rosier530f7ce2011-10-26 22:47:55 +00001530 // Emit the compare.
Chad Rosiere07cd5e2011-11-02 18:08:25 +00001531 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned()))
Chad Rosier530f7ce2011-10-26 22:47:55 +00001532 return false;
Eric Christopherac1a19e2010-09-09 01:06:51 +00001533
Eric Christopher229207a2010-09-29 01:14:47 +00001534 // Now set a register based on the comparison. Explicitly set the predicates
1535 // here.
Chad Rosier66dc8ca2011-11-08 21:12:00 +00001536 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
Craig Topper420761a2012-04-20 07:30:17 +00001537 const TargetRegisterClass *RC = isThumb2 ?
1538 (const TargetRegisterClass*)&ARM::rGPRRegClass :
1539 (const TargetRegisterClass*)&ARM::GPRRegClass;
Eric Christopher5d18d922010-10-07 05:39:19 +00001540 unsigned DestReg = createResultReg(RC);
Chad Rosierade62002011-10-26 23:25:44 +00001541 Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0);
Eric Christopher229207a2010-09-29 01:14:47 +00001542 unsigned ZeroReg = TargetMaterializeConstant(Zero);
Chad Rosier44c98b72012-03-07 20:59:26 +00001543 // ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR.
Eric Christopher229207a2010-09-29 01:14:47 +00001544 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg)
1545 .addReg(ZeroReg).addImm(1)
Chad Rosier44c98b72012-03-07 20:59:26 +00001546 .addImm(ARMPred).addReg(ARM::CPSR);
Eric Christopher229207a2010-09-29 01:14:47 +00001547
Eric Christophera5b1e682010-09-17 22:28:18 +00001548 UpdateValueMap(I, DestReg);
Eric Christopherd43393a2010-09-08 23:13:45 +00001549 return true;
1550}
1551
Eric Christopher43b62be2010-09-27 06:02:23 +00001552bool ARMFastISel::SelectFPExt(const Instruction *I) {
Eric Christopher46203602010-09-09 00:26:48 +00001553 // Make sure we have VFP and that we're extending float to double.
1554 if (!Subtarget->hasVFP2()) return false;
Eric Christopherac1a19e2010-09-09 01:06:51 +00001555
Eric Christopher46203602010-09-09 00:26:48 +00001556 Value *V = I->getOperand(0);
1557 if (!I->getType()->isDoubleTy() ||
1558 !V->getType()->isFloatTy()) return false;
Eric Christopherac1a19e2010-09-09 01:06:51 +00001559
Eric Christopher46203602010-09-09 00:26:48 +00001560 unsigned Op = getRegForValue(V);
1561 if (Op == 0) return false;
Eric Christopherac1a19e2010-09-09 01:06:51 +00001562
Craig Topper420761a2012-04-20 07:30:17 +00001563 unsigned Result = createResultReg(&ARM::DPRRegClass);
Eric Christopherac1a19e2010-09-09 01:06:51 +00001564 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
Eric Christopheref2fdd22010-09-09 20:36:19 +00001565 TII.get(ARM::VCVTDS), Result)
Eric Christopherce07b542010-09-09 20:26:31 +00001566 .addReg(Op));
1567 UpdateValueMap(I, Result);
1568 return true;
1569}
1570
Eric Christopher43b62be2010-09-27 06:02:23 +00001571bool ARMFastISel::SelectFPTrunc(const Instruction *I) {
Eric Christopherce07b542010-09-09 20:26:31 +00001572 // Make sure we have VFP and that we're truncating double to float.
1573 if (!Subtarget->hasVFP2()) return false;
1574
1575 Value *V = I->getOperand(0);
Eric Christopher022b7fb2010-10-05 23:13:24 +00001576 if (!(I->getType()->isFloatTy() &&
1577 V->getType()->isDoubleTy())) return false;
Eric Christopherce07b542010-09-09 20:26:31 +00001578
1579 unsigned Op = getRegForValue(V);
1580 if (Op == 0) return false;
1581
Craig Topper420761a2012-04-20 07:30:17 +00001582 unsigned Result = createResultReg(&ARM::SPRRegClass);
Eric Christopherce07b542010-09-09 20:26:31 +00001583 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
Eric Christopheref2fdd22010-09-09 20:36:19 +00001584 TII.get(ARM::VCVTSD), Result)
Eric Christopher46203602010-09-09 00:26:48 +00001585 .addReg(Op));
1586 UpdateValueMap(I, Result);
1587 return true;
1588}
1589
Chad Rosierae46a332012-02-03 21:14:11 +00001590bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) {
Eric Christopher9a040492010-09-09 18:54:59 +00001591 // Make sure we have VFP.
1592 if (!Subtarget->hasVFP2()) return false;
Eric Christopherdccd2c32010-10-11 08:38:55 +00001593
Duncan Sands1440e8b2010-11-03 11:35:31 +00001594 MVT DstVT;
Chris Lattnerdb125cf2011-07-18 04:54:35 +00001595 Type *Ty = I->getType();
Eric Christopher9ee4ce22010-09-09 21:44:45 +00001596 if (!isTypeLegal(Ty, DstVT))
Eric Christopher9a040492010-09-09 18:54:59 +00001597 return false;
Eric Christopherdccd2c32010-10-11 08:38:55 +00001598
Chad Rosier463fe242011-11-03 02:04:59 +00001599 Value *Src = I->getOperand(0);
Patrik Hagglund3d170e62012-12-17 14:30:06 +00001600 EVT SrcEVT = TLI.getValueType(Src->getType(), true);
1601 if (!SrcEVT.isSimple())
1602 return false;
1603 MVT SrcVT = SrcEVT.getSimpleVT();
Chad Rosier463fe242011-11-03 02:04:59 +00001604 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
Eli Friedman783c6642011-05-25 19:09:45 +00001605 return false;
1606
Chad Rosier463fe242011-11-03 02:04:59 +00001607 unsigned SrcReg = getRegForValue(Src);
1608 if (SrcReg == 0) return false;
1609
1610 // Handle sign-extension.
1611 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) {
Chad Rosier316a5aa2012-12-17 19:59:43 +00001612 SrcReg = ARMEmitIntExt(SrcVT, SrcReg, MVT::i32,
Chad Rosierae46a332012-02-03 21:14:11 +00001613 /*isZExt*/!isSigned);
Chad Rosiera69feb02012-02-16 22:45:33 +00001614 if (SrcReg == 0) return false;
Chad Rosier463fe242011-11-03 02:04:59 +00001615 }
Eric Christopherdccd2c32010-10-11 08:38:55 +00001616
Eric Christopherdb12b2b2010-09-10 00:34:35 +00001617 // The conversion routine works on fp-reg to fp-reg and the operand above
1618 // was an integer, move it to the fp registers if possible.
Chad Rosier463fe242011-11-03 02:04:59 +00001619 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg);
Eric Christopher9ee4ce22010-09-09 21:44:45 +00001620 if (FP == 0) return false;
Eric Christopherdccd2c32010-10-11 08:38:55 +00001621
Eric Christopher9a040492010-09-09 18:54:59 +00001622 unsigned Opc;
Chad Rosierae46a332012-02-03 21:14:11 +00001623 if (Ty->isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS;
1624 else if (Ty->isDoubleTy()) Opc = isSigned ? ARM::VSITOD : ARM::VUITOD;
Chad Rosierdd1e7512011-08-31 23:49:05 +00001625 else return false;
Eric Christopherdccd2c32010-10-11 08:38:55 +00001626
Eric Christopher9ee4ce22010-09-09 21:44:45 +00001627 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT));
Eric Christopher9a040492010-09-09 18:54:59 +00001628 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
1629 ResultReg)
Eric Christopher9ee4ce22010-09-09 21:44:45 +00001630 .addReg(FP));
Eric Christopherce07b542010-09-09 20:26:31 +00001631 UpdateValueMap(I, ResultReg);
Eric Christopher9a040492010-09-09 18:54:59 +00001632 return true;
1633}
1634
Chad Rosierae46a332012-02-03 21:14:11 +00001635bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) {
Eric Christopher9a040492010-09-09 18:54:59 +00001636 // Make sure we have VFP.
1637 if (!Subtarget->hasVFP2()) return false;
Eric Christopherdccd2c32010-10-11 08:38:55 +00001638
Duncan Sands1440e8b2010-11-03 11:35:31 +00001639 MVT DstVT;
Chris Lattnerdb125cf2011-07-18 04:54:35 +00001640 Type *RetTy = I->getType();
Eric Christopher920a2082010-09-10 00:35:09 +00001641 if (!isTypeLegal(RetTy, DstVT))
Eric Christopher9a040492010-09-09 18:54:59 +00001642 return false;
Eric Christopherdccd2c32010-10-11 08:38:55 +00001643
Eric Christopher9a040492010-09-09 18:54:59 +00001644 unsigned Op = getRegForValue(I->getOperand(0));
1645 if (Op == 0) return false;
Eric Christopherdccd2c32010-10-11 08:38:55 +00001646
Eric Christopher9a040492010-09-09 18:54:59 +00001647 unsigned Opc;
Chris Lattnerdb125cf2011-07-18 04:54:35 +00001648 Type *OpTy = I->getOperand(0)->getType();
Chad Rosierae46a332012-02-03 21:14:11 +00001649 if (OpTy->isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS;
1650 else if (OpTy->isDoubleTy()) Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD;
Chad Rosierdd1e7512011-08-31 23:49:05 +00001651 else return false;
Eric Christopherdccd2c32010-10-11 08:38:55 +00001652
Chad Rosieree8901c2012-02-03 20:27:51 +00001653 // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg.
Eric Christopher022b7fb2010-10-05 23:13:24 +00001654 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32));
Eric Christopher9a040492010-09-09 18:54:59 +00001655 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
1656 ResultReg)
1657 .addReg(Op));
Eric Christopherdccd2c32010-10-11 08:38:55 +00001658
Eric Christopher9ee4ce22010-09-09 21:44:45 +00001659 // This result needs to be in an integer register, but the conversion only
1660 // takes place in fp-regs.
Eric Christopherdb12b2b2010-09-10 00:34:35 +00001661 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg);
Eric Christopher9ee4ce22010-09-09 21:44:45 +00001662 if (IntReg == 0) return false;
Eric Christopherdccd2c32010-10-11 08:38:55 +00001663
Eric Christopher9ee4ce22010-09-09 21:44:45 +00001664 UpdateValueMap(I, IntReg);
Eric Christopher9a040492010-09-09 18:54:59 +00001665 return true;
1666}
1667
Eric Christopher3bbd3962010-10-11 08:27:59 +00001668bool ARMFastISel::SelectSelect(const Instruction *I) {
Duncan Sands1440e8b2010-11-03 11:35:31 +00001669 MVT VT;
1670 if (!isTypeLegal(I->getType(), VT))
Eric Christopher3bbd3962010-10-11 08:27:59 +00001671 return false;
1672
1673 // Things need to be register sized for register moves.
Duncan Sands1440e8b2010-11-03 11:35:31 +00001674 if (VT != MVT::i32) return false;
Eric Christopher3bbd3962010-10-11 08:27:59 +00001675
1676 unsigned CondReg = getRegForValue(I->getOperand(0));
1677 if (CondReg == 0) return false;
1678 unsigned Op1Reg = getRegForValue(I->getOperand(1));
1679 if (Op1Reg == 0) return false;
Eric Christopher3bbd3962010-10-11 08:27:59 +00001680
Chad Rosiera07d3fc2011-11-11 06:20:39 +00001681 // Check to see if we can use an immediate in the conditional move.
1682 int Imm = 0;
1683 bool UseImm = false;
1684 bool isNegativeImm = false;
1685 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) {
1686 assert (VT == MVT::i32 && "Expecting an i32.");
1687 Imm = (int)ConstInt->getValue().getZExtValue();
1688 if (Imm < 0) {
1689 isNegativeImm = true;
1690 Imm = ~Imm;
1691 }
1692 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
1693 (ARM_AM::getSOImmVal(Imm) != -1);
1694 }
1695
Duncan Sands4c0c5452011-11-28 10:31:27 +00001696 unsigned Op2Reg = 0;
Chad Rosiera07d3fc2011-11-11 06:20:39 +00001697 if (!UseImm) {
1698 Op2Reg = getRegForValue(I->getOperand(2));
1699 if (Op2Reg == 0) return false;
1700 }
1701
1702 unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri;
Eric Christopher3bbd3962010-10-11 08:27:59 +00001703 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
Chad Rosiera07d3fc2011-11-11 06:20:39 +00001704 .addReg(CondReg).addImm(0));
1705
1706 unsigned MovCCOpc;
Chad Rosierac3158b2012-11-27 21:46:46 +00001707 const TargetRegisterClass *RC;
Chad Rosiera07d3fc2011-11-11 06:20:39 +00001708 if (!UseImm) {
Chad Rosierac3158b2012-11-27 21:46:46 +00001709 RC = isThumb2 ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
Chad Rosiera07d3fc2011-11-11 06:20:39 +00001710 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr;
1711 } else {
Chad Rosierac3158b2012-11-27 21:46:46 +00001712 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass;
1713 if (!isNegativeImm)
Chad Rosiera07d3fc2011-11-11 06:20:39 +00001714 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
Chad Rosierac3158b2012-11-27 21:46:46 +00001715 else
Chad Rosiera07d3fc2011-11-11 06:20:39 +00001716 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi;
Chad Rosiera07d3fc2011-11-11 06:20:39 +00001717 }
Eric Christopher3bbd3962010-10-11 08:27:59 +00001718 unsigned ResultReg = createResultReg(RC);
Chad Rosiera07d3fc2011-11-11 06:20:39 +00001719 if (!UseImm)
1720 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg)
1721 .addReg(Op2Reg).addReg(Op1Reg).addImm(ARMCC::NE).addReg(ARM::CPSR);
1722 else
1723 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg)
1724 .addReg(Op1Reg).addImm(Imm).addImm(ARMCC::EQ).addReg(ARM::CPSR);
Eric Christopher3bbd3962010-10-11 08:27:59 +00001725 UpdateValueMap(I, ResultReg);
1726 return true;
1727}
1728
Chad Rosier7ccb30b2012-02-03 21:07:27 +00001729bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) {
Duncan Sands1440e8b2010-11-03 11:35:31 +00001730 MVT VT;
Chris Lattnerdb125cf2011-07-18 04:54:35 +00001731 Type *Ty = I->getType();
Eric Christopher08637852010-09-30 22:34:19 +00001732 if (!isTypeLegal(Ty, VT))
1733 return false;
1734
1735 // If we have integer div support we should have selected this automagically.
1736 // In case we have a real miss go ahead and return false and we'll pick
1737 // it up later.
Eric Christopherdccd2c32010-10-11 08:38:55 +00001738 if (Subtarget->hasDivide()) return false;
1739
Eric Christopher08637852010-09-30 22:34:19 +00001740 // Otherwise emit a libcall.
1741 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
Eric Christopher7bdc4de2010-10-11 08:31:54 +00001742 if (VT == MVT::i8)
Chad Rosier7ccb30b2012-02-03 21:07:27 +00001743 LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8;
Eric Christopher7bdc4de2010-10-11 08:31:54 +00001744 else if (VT == MVT::i16)
Chad Rosier7ccb30b2012-02-03 21:07:27 +00001745 LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16;
Eric Christopher08637852010-09-30 22:34:19 +00001746 else if (VT == MVT::i32)
Chad Rosier7ccb30b2012-02-03 21:07:27 +00001747 LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32;
Eric Christopher08637852010-09-30 22:34:19 +00001748 else if (VT == MVT::i64)
Chad Rosier7ccb30b2012-02-03 21:07:27 +00001749 LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64;
Eric Christopher08637852010-09-30 22:34:19 +00001750 else if (VT == MVT::i128)
Chad Rosier7ccb30b2012-02-03 21:07:27 +00001751 LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128;
Eric Christopher08637852010-09-30 22:34:19 +00001752 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!");
Eric Christopherdccd2c32010-10-11 08:38:55 +00001753
Eric Christopher08637852010-09-30 22:34:19 +00001754 return ARMEmitLibcall(I, LC);
1755}
1756
Chad Rosier769422f2012-02-03 21:23:45 +00001757bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) {
Duncan Sands1440e8b2010-11-03 11:35:31 +00001758 MVT VT;
Chris Lattnerdb125cf2011-07-18 04:54:35 +00001759 Type *Ty = I->getType();
Eric Christopher6a880d62010-10-11 08:37:26 +00001760 if (!isTypeLegal(Ty, VT))
1761 return false;
1762
1763 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
1764 if (VT == MVT::i8)
Chad Rosier769422f2012-02-03 21:23:45 +00001765 LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8;
Eric Christopher6a880d62010-10-11 08:37:26 +00001766 else if (VT == MVT::i16)
Chad Rosier769422f2012-02-03 21:23:45 +00001767 LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16;
Eric Christopher6a880d62010-10-11 08:37:26 +00001768 else if (VT == MVT::i32)
Chad Rosier769422f2012-02-03 21:23:45 +00001769 LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32;
Eric Christopher6a880d62010-10-11 08:37:26 +00001770 else if (VT == MVT::i64)
Chad Rosier769422f2012-02-03 21:23:45 +00001771 LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64;
Eric Christopher6a880d62010-10-11 08:37:26 +00001772 else if (VT == MVT::i128)
Chad Rosier769422f2012-02-03 21:23:45 +00001773 LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128;
Eric Christophera1640d92010-10-11 08:40:05 +00001774 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!");
Eric Christopher2896df82010-10-15 18:02:07 +00001775
Eric Christopher6a880d62010-10-11 08:37:26 +00001776 return ARMEmitLibcall(I, LC);
1777}
1778
Chad Rosier3901c3e2012-02-06 23:50:07 +00001779bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
Chad Rosier3901c3e2012-02-06 23:50:07 +00001780 EVT DestVT = TLI.getValueType(I->getType(), true);
1781
1782 // We can get here in the case when we have a binary operation on a non-legal
1783 // type and the target independent selector doesn't know how to handle it.
1784 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
1785 return false;
Jush Luefc967e2012-06-14 06:08:19 +00001786
Chad Rosier6fde8752012-02-08 02:29:21 +00001787 unsigned Opc;
1788 switch (ISDOpcode) {
1789 default: return false;
1790 case ISD::ADD:
1791 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr;
1792 break;
1793 case ISD::OR:
1794 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr;
1795 break;
Chad Rosier743e1992012-02-08 02:45:44 +00001796 case ISD::SUB:
1797 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr;
1798 break;
Chad Rosier6fde8752012-02-08 02:29:21 +00001799 }
1800
Chad Rosier3901c3e2012-02-06 23:50:07 +00001801 unsigned SrcReg1 = getRegForValue(I->getOperand(0));
1802 if (SrcReg1 == 0) return false;
1803
1804 // TODO: Often the 2nd operand is an immediate, which can be encoded directly
1805 // in the instruction, rather then materializing the value in a register.
1806 unsigned SrcReg2 = getRegForValue(I->getOperand(1));
1807 if (SrcReg2 == 0) return false;
1808
JF Bastiena9a8a122013-05-29 15:45:47 +00001809 unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass);
Chad Rosier3901c3e2012-02-06 23:50:07 +00001810 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1811 TII.get(Opc), ResultReg)
1812 .addReg(SrcReg1).addReg(SrcReg2));
1813 UpdateValueMap(I, ResultReg);
1814 return true;
1815}
1816
1817bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) {
Chad Rosier316a5aa2012-12-17 19:59:43 +00001818 EVT FPVT = TLI.getValueType(I->getType(), true);
1819 if (!FPVT.isSimple()) return false;
1820 MVT VT = FPVT.getSimpleVT();
Eric Christopherac1a19e2010-09-09 01:06:51 +00001821
Eric Christopherbc39b822010-09-09 00:53:57 +00001822 // We can get here in the case when we want to use NEON for our fp
1823 // operations, but can't figure out how to. Just use the vfp instructions
1824 // if we have them.
1825 // FIXME: It'd be nice to use NEON instructions.
Chris Lattnerdb125cf2011-07-18 04:54:35 +00001826 Type *Ty = I->getType();
Eric Christopherbd6bf082010-09-09 01:02:03 +00001827 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy());
1828 if (isFloat && !Subtarget->hasVFP2())
1829 return false;
Eric Christopherac1a19e2010-09-09 01:06:51 +00001830
Eric Christopherbc39b822010-09-09 00:53:57 +00001831 unsigned Opc;
Duncan Sandscdfad362010-11-03 12:17:33 +00001832 bool is64bit = VT == MVT::f64 || VT == MVT::i64;
Eric Christopherbc39b822010-09-09 00:53:57 +00001833 switch (ISDOpcode) {
1834 default: return false;
1835 case ISD::FADD:
Eric Christopherbd6bf082010-09-09 01:02:03 +00001836 Opc = is64bit ? ARM::VADDD : ARM::VADDS;
Eric Christopherbc39b822010-09-09 00:53:57 +00001837 break;
1838 case ISD::FSUB:
Eric Christopherbd6bf082010-09-09 01:02:03 +00001839 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS;
Eric Christopherbc39b822010-09-09 00:53:57 +00001840 break;
1841 case ISD::FMUL:
Eric Christopherbd6bf082010-09-09 01:02:03 +00001842 Opc = is64bit ? ARM::VMULD : ARM::VMULS;
Eric Christopherbc39b822010-09-09 00:53:57 +00001843 break;
1844 }
Chad Rosier508a1f42011-11-16 18:39:44 +00001845 unsigned Op1 = getRegForValue(I->getOperand(0));
1846 if (Op1 == 0) return false;
1847
1848 unsigned Op2 = getRegForValue(I->getOperand(1));
1849 if (Op2 == 0) return false;
1850
Chad Rosier316a5aa2012-12-17 19:59:43 +00001851 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy));
Eric Christopherbc39b822010-09-09 00:53:57 +00001852 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1853 TII.get(Opc), ResultReg)
1854 .addReg(Op1).addReg(Op2));
Eric Christopherce07b542010-09-09 20:26:31 +00001855 UpdateValueMap(I, ResultReg);
Eric Christopherbc39b822010-09-09 00:53:57 +00001856 return true;
1857}
1858
Eric Christopherd10cd7b2010-09-10 23:18:12 +00001859// Call Handling Code
1860
Jush Luee649832012-07-19 09:49:00 +00001861// This is largely taken directly from CCAssignFnForNode
Eric Christopherd10cd7b2010-09-10 23:18:12 +00001862// TODO: We may not support all of this.
Jush Luee649832012-07-19 09:49:00 +00001863CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC,
1864 bool Return,
1865 bool isVarArg) {
Eric Christopherd10cd7b2010-09-10 23:18:12 +00001866 switch (CC) {
1867 default:
1868 llvm_unreachable("Unsupported calling convention");
Eric Christopherd10cd7b2010-09-10 23:18:12 +00001869 case CallingConv::Fast:
Jush Lu2ff4e9d2012-08-16 05:15:53 +00001870 if (Subtarget->hasVFP2() && !isVarArg) {
1871 if (!Subtarget->isAAPCS_ABI())
1872 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
1873 // For AAPCS ABI targets, just use VFP variant of the calling convention.
1874 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
1875 }
Evan Cheng1f8b40d2010-10-22 18:57:05 +00001876 // Fallthrough
1877 case CallingConv::C:
Eric Christopherd10cd7b2010-09-10 23:18:12 +00001878 // Use target triple & subtarget features to do actual dispatch.
1879 if (Subtarget->isAAPCS_ABI()) {
1880 if (Subtarget->hasVFP2() &&
Jush Luee649832012-07-19 09:49:00 +00001881 TM.Options.FloatABIType == FloatABI::Hard && !isVarArg)
Eric Christopherd10cd7b2010-09-10 23:18:12 +00001882 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
1883 else
1884 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
1885 } else
1886 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
1887 case CallingConv::ARM_AAPCS_VFP:
Jush Luee649832012-07-19 09:49:00 +00001888 if (!isVarArg)
1889 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
1890 // Fall through to soft float variant, variadic functions don't
1891 // use hard floating point ABI.
Eric Christopherd10cd7b2010-09-10 23:18:12 +00001892 case CallingConv::ARM_AAPCS:
1893 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
1894 case CallingConv::ARM_APCS:
1895 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
Eric Christophere94ac882012-08-03 00:05:53 +00001896 case CallingConv::GHC:
1897 if (Return)
1898 llvm_unreachable("Can't return in GHC call convention");
1899 else
1900 return CC_ARM_APCS_GHC;
Eric Christopherd10cd7b2010-09-10 23:18:12 +00001901 }
1902}
1903
Eric Christophera9a7a1a2010-09-29 23:11:09 +00001904bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
1905 SmallVectorImpl<unsigned> &ArgRegs,
Duncan Sands1440e8b2010-11-03 11:35:31 +00001906 SmallVectorImpl<MVT> &ArgVTs,
Eric Christophera9a7a1a2010-09-29 23:11:09 +00001907 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
1908 SmallVectorImpl<unsigned> &RegArgs,
1909 CallingConv::ID CC,
Jush Luee649832012-07-19 09:49:00 +00001910 unsigned &NumBytes,
1911 bool isVarArg) {
Eric Christophera9a7a1a2010-09-29 23:11:09 +00001912 SmallVector<CCValAssign, 16> ArgLocs;
Jush Luee649832012-07-19 09:49:00 +00001913 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, ArgLocs, *Context);
1914 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags,
1915 CCAssignFnForCall(CC, false, isVarArg));
Eric Christophera9a7a1a2010-09-29 23:11:09 +00001916
Bill Wendling5aeff312012-03-16 23:11:07 +00001917 // Check that we can handle all of the arguments. If we can't, then bail out
1918 // now before we add code to the MBB.
1919 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1920 CCValAssign &VA = ArgLocs[i];
1921 MVT ArgVT = ArgVTs[VA.getValNo()];
1922
1923 // We don't handle NEON/vector parameters yet.
1924 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64)
1925 return false;
1926
1927 // Now copy/store arg to correct locations.
1928 if (VA.isRegLoc() && !VA.needsCustom()) {
1929 continue;
1930 } else if (VA.needsCustom()) {
1931 // TODO: We need custom lowering for vector (v2f64) args.
1932 if (VA.getLocVT() != MVT::f64 ||
1933 // TODO: Only handle register args for now.
1934 !VA.isRegLoc() || !ArgLocs[++i].isRegLoc())
1935 return false;
1936 } else {
1937 switch (static_cast<EVT>(ArgVT).getSimpleVT().SimpleTy) {
1938 default:
1939 return false;
1940 case MVT::i1:
1941 case MVT::i8:
1942 case MVT::i16:
1943 case MVT::i32:
1944 break;
1945 case MVT::f32:
1946 if (!Subtarget->hasVFP2())
1947 return false;
1948 break;
1949 case MVT::f64:
1950 if (!Subtarget->hasVFP2())
1951 return false;
1952 break;
1953 }
1954 }
1955 }
1956
1957 // At the point, we are able to handle the call's arguments in fast isel.
1958
Eric Christophera9a7a1a2010-09-29 23:11:09 +00001959 // Get a count of how many bytes are to be pushed on the stack.
1960 NumBytes = CCInfo.getNextStackOffset();
1961
1962 // Issue CALLSEQ_START
Evan Chengd5b03f22011-06-28 21:14:33 +00001963 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
Eric Christopherfb0b8922010-10-11 21:20:02 +00001964 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1965 TII.get(AdjStackDown))
1966 .addImm(NumBytes));
Eric Christophera9a7a1a2010-09-29 23:11:09 +00001967
1968 // Process the args.
1969 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1970 CCValAssign &VA = ArgLocs[i];
1971 unsigned Arg = ArgRegs[VA.getValNo()];
Duncan Sands1440e8b2010-11-03 11:35:31 +00001972 MVT ArgVT = ArgVTs[VA.getValNo()];
Eric Christophera9a7a1a2010-09-29 23:11:09 +00001973
Bill Wendling5aeff312012-03-16 23:11:07 +00001974 assert((!ArgVT.isVector() && ArgVT.getSizeInBits() <= 64) &&
1975 "We don't handle NEON/vector parameters yet.");
Eric Christophera4633f52010-10-23 09:37:17 +00001976
Eric Christopherf9764fa2010-09-30 20:49:44 +00001977 // Handle arg promotion, etc.
Eric Christophera9a7a1a2010-09-29 23:11:09 +00001978 switch (VA.getLocInfo()) {
1979 case CCValAssign::Full: break;
Eric Christopherfa87d662010-10-18 02:17:53 +00001980 case CCValAssign::SExt: {
Chad Rosierb74c8652011-12-02 20:25:18 +00001981 MVT DestVT = VA.getLocVT();
Chad Rosier5793a652012-02-14 22:29:48 +00001982 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/false);
1983 assert (Arg != 0 && "Failed to emit a sext");
Chad Rosierb74c8652011-12-02 20:25:18 +00001984 ArgVT = DestVT;
Eric Christopherfa87d662010-10-18 02:17:53 +00001985 break;
1986 }
Chad Rosier42536af2011-11-05 20:16:15 +00001987 case CCValAssign::AExt:
1988 // Intentional fall-through. Handle AExt and ZExt.
Eric Christopherfa87d662010-10-18 02:17:53 +00001989 case CCValAssign::ZExt: {
Chad Rosierb74c8652011-12-02 20:25:18 +00001990 MVT DestVT = VA.getLocVT();
Chad Rosier5793a652012-02-14 22:29:48 +00001991 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/true);
JF Bastien8fc760c2013-06-07 20:10:37 +00001992 assert (Arg != 0 && "Failed to emit a zext");
Chad Rosierb74c8652011-12-02 20:25:18 +00001993 ArgVT = DestVT;
Eric Christopherfa87d662010-10-18 02:17:53 +00001994 break;
1995 }
1996 case CCValAssign::BCvt: {
Wesley Peckbf17cfa2010-11-23 03:31:01 +00001997 unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg,
Duncan Sands1440e8b2010-11-03 11:35:31 +00001998 /*TODO: Kill=*/false);
Eric Christopherfa87d662010-10-18 02:17:53 +00001999 assert(BC != 0 && "Failed to emit a bitcast!");
2000 Arg = BC;
2001 ArgVT = VA.getLocVT();
2002 break;
2003 }
2004 default: llvm_unreachable("Unknown arg promotion!");
Eric Christophera9a7a1a2010-09-29 23:11:09 +00002005 }
2006
2007 // Now copy/store arg to correct locations.
Eric Christopherfb0b8922010-10-11 21:20:02 +00002008 if (VA.isRegLoc() && !VA.needsCustom()) {
Eric Christophera9a7a1a2010-09-29 23:11:09 +00002009 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
Eric Christopherf9764fa2010-09-30 20:49:44 +00002010 VA.getLocReg())
Chad Rosier42536af2011-11-05 20:16:15 +00002011 .addReg(Arg);
Eric Christophera9a7a1a2010-09-29 23:11:09 +00002012 RegArgs.push_back(VA.getLocReg());
Eric Christopher2d8f6fe2010-10-21 00:01:47 +00002013 } else if (VA.needsCustom()) {
2014 // TODO: We need custom lowering for vector (v2f64) args.
Bill Wendling5aeff312012-03-16 23:11:07 +00002015 assert(VA.getLocVT() == MVT::f64 &&
2016 "Custom lowering for v2f64 args not available");
Jim Grosbach6b156392010-10-27 21:39:08 +00002017
Eric Christopher2d8f6fe2010-10-21 00:01:47 +00002018 CCValAssign &NextVA = ArgLocs[++i];
2019
Bill Wendling5aeff312012-03-16 23:11:07 +00002020 assert(VA.isRegLoc() && NextVA.isRegLoc() &&
2021 "We only handle register args!");
Eric Christopher2d8f6fe2010-10-21 00:01:47 +00002022
2023 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
2024 TII.get(ARM::VMOVRRD), VA.getLocReg())
2025 .addReg(NextVA.getLocReg(), RegState::Define)
2026 .addReg(Arg));
2027 RegArgs.push_back(VA.getLocReg());
2028 RegArgs.push_back(NextVA.getLocReg());
Eric Christophera9a7a1a2010-09-29 23:11:09 +00002029 } else {
Eric Christopher5b924802010-10-21 20:09:54 +00002030 assert(VA.isMemLoc());
2031 // Need to store on the stack.
Eric Christopher0d581222010-11-19 22:30:02 +00002032 Address Addr;
2033 Addr.BaseType = Address::RegBase;
2034 Addr.Base.Reg = ARM::SP;
2035 Addr.Offset = VA.getLocMemOffset();
Eric Christopher5b924802010-10-21 20:09:54 +00002036
Bill Wendling5aeff312012-03-16 23:11:07 +00002037 bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet;
2038 assert(EmitRet && "Could not emit a store for argument!");
Eric Christophera9a7a1a2010-09-29 23:11:09 +00002039 }
2040 }
Bill Wendling5aeff312012-03-16 23:11:07 +00002041
Eric Christophera9a7a1a2010-09-29 23:11:09 +00002042 return true;
2043}
2044
Duncan Sands1440e8b2010-11-03 11:35:31 +00002045bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
Eric Christophera9a7a1a2010-09-29 23:11:09 +00002046 const Instruction *I, CallingConv::ID CC,
Jush Luee649832012-07-19 09:49:00 +00002047 unsigned &NumBytes, bool isVarArg) {
Eric Christophera9a7a1a2010-09-29 23:11:09 +00002048 // Issue CALLSEQ_END
Evan Chengd5b03f22011-06-28 21:14:33 +00002049 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
Eric Christopherfb0b8922010-10-11 21:20:02 +00002050 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
2051 TII.get(AdjStackUp))
2052 .addImm(NumBytes).addImm(0));
Eric Christophera9a7a1a2010-09-29 23:11:09 +00002053
2054 // Now the return value.
Duncan Sands1440e8b2010-11-03 11:35:31 +00002055 if (RetVT != MVT::isVoid) {
Eric Christophera9a7a1a2010-09-29 23:11:09 +00002056 SmallVector<CCValAssign, 16> RVLocs;
Jush Luee649832012-07-19 09:49:00 +00002057 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context);
2058 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg));
Eric Christophera9a7a1a2010-09-29 23:11:09 +00002059
2060 // Copy all of the result registers out of their specified physreg.
Duncan Sands1440e8b2010-11-03 11:35:31 +00002061 if (RVLocs.size() == 2 && RetVT == MVT::f64) {
Eric Christopher14df8822010-10-01 00:00:11 +00002062 // For this move we copy into two registers and then move into the
2063 // double fp reg we want.
Patrik Hagglunda61b17c2012-12-13 06:34:11 +00002064 MVT DestVT = RVLocs[0].getValVT();
Craig Topper44d23822012-02-22 05:59:10 +00002065 const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT);
Eric Christopher14df8822010-10-01 00:00:11 +00002066 unsigned ResultReg = createResultReg(DstRC);
2067 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
2068 TII.get(ARM::VMOVDRR), ResultReg)
Eric Christopher3659ac22010-10-20 08:02:24 +00002069 .addReg(RVLocs[0].getLocReg())
2070 .addReg(RVLocs[1].getLocReg()));
Eric Christopherdccd2c32010-10-11 08:38:55 +00002071
Eric Christopher3659ac22010-10-20 08:02:24 +00002072 UsedRegs.push_back(RVLocs[0].getLocReg());
2073 UsedRegs.push_back(RVLocs[1].getLocReg());
Jim Grosbach6b156392010-10-27 21:39:08 +00002074
Eric Christopherdccd2c32010-10-11 08:38:55 +00002075 // Finally update the result.
Eric Christopher14df8822010-10-01 00:00:11 +00002076 UpdateValueMap(I, ResultReg);
Chad Rosier2a2e9d52012-05-11 18:51:55 +00002077 } else {
2078 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!");
Patrik Hagglunda61b17c2012-12-13 06:34:11 +00002079 MVT CopyVT = RVLocs[0].getValVT();
Chad Rosier0eff39f2011-11-08 00:03:32 +00002080
2081 // Special handling for extended integers.
2082 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)
2083 CopyVT = MVT::i32;
2084
Craig Topper44d23822012-02-22 05:59:10 +00002085 const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);
Eric Christophera9a7a1a2010-09-29 23:11:09 +00002086
Eric Christopher14df8822010-10-01 00:00:11 +00002087 unsigned ResultReg = createResultReg(DstRC);
2088 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
2089 ResultReg).addReg(RVLocs[0].getLocReg());
2090 UsedRegs.push_back(RVLocs[0].getLocReg());
Eric Christophera9a7a1a2010-09-29 23:11:09 +00002091
Eric Christopherdccd2c32010-10-11 08:38:55 +00002092 // Finally update the result.
Eric Christopher14df8822010-10-01 00:00:11 +00002093 UpdateValueMap(I, ResultReg);
2094 }
Eric Christophera9a7a1a2010-09-29 23:11:09 +00002095 }
2096
Eric Christopherdccd2c32010-10-11 08:38:55 +00002097 return true;
Eric Christophera9a7a1a2010-09-29 23:11:09 +00002098}
2099
Eric Christopher4f512ef2010-10-22 01:28:00 +00002100bool ARMFastISel::SelectRet(const Instruction *I) {
2101 const ReturnInst *Ret = cast<ReturnInst>(I);
2102 const Function &F = *I->getParent()->getParent();
Jim Grosbach6b156392010-10-27 21:39:08 +00002103
Eric Christopher4f512ef2010-10-22 01:28:00 +00002104 if (!FuncInfo.CanLowerReturn)
2105 return false;
Jim Grosbach6b156392010-10-27 21:39:08 +00002106
Jakob Stoklund Olesenfc743272013-02-05 18:08:40 +00002107 // Build a list of return value registers.
2108 SmallVector<unsigned, 4> RetRegs;
2109
Eric Christopher4f512ef2010-10-22 01:28:00 +00002110 CallingConv::ID CC = F.getCallingConv();
2111 if (Ret->getNumOperands() > 0) {
2112 SmallVector<ISD::OutputArg, 4> Outs;
Bill Wendling8b62abd2012-12-30 13:01:51 +00002113 GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI);
Eric Christopher4f512ef2010-10-22 01:28:00 +00002114
2115 // Analyze operands of the call, assigning locations to each operand.
2116 SmallVector<CCValAssign, 16> ValLocs;
Jim Grosbachb04546f2011-09-13 20:30:37 +00002117 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,I->getContext());
Jush Luee649832012-07-19 09:49:00 +00002118 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */,
2119 F.isVarArg()));
Eric Christopher4f512ef2010-10-22 01:28:00 +00002120
2121 const Value *RV = Ret->getOperand(0);
2122 unsigned Reg = getRegForValue(RV);
2123 if (Reg == 0)
2124 return false;
2125
2126 // Only handle a single return value for now.
2127 if (ValLocs.size() != 1)
2128 return false;
2129
2130 CCValAssign &VA = ValLocs[0];
Jim Grosbach6b156392010-10-27 21:39:08 +00002131
Eric Christopher4f512ef2010-10-22 01:28:00 +00002132 // Don't bother handling odd stuff for now.
2133 if (VA.getLocInfo() != CCValAssign::Full)
2134 return false;
2135 // Only handle register returns for now.
2136 if (!VA.isRegLoc())
2137 return false;
Chad Rosierf470cbb2011-11-04 00:50:21 +00002138
2139 unsigned SrcReg = Reg + VA.getValNo();
Chad Rosier316a5aa2012-12-17 19:59:43 +00002140 EVT RVEVT = TLI.getValueType(RV->getType());
2141 if (!RVEVT.isSimple()) return false;
2142 MVT RVVT = RVEVT.getSimpleVT();
Patrik Hagglunda61b17c2012-12-13 06:34:11 +00002143 MVT DestVT = VA.getValVT();
Chad Rosierf470cbb2011-11-04 00:50:21 +00002144 // Special handling for extended integers.
2145 if (RVVT != DestVT) {
2146 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
2147 return false;
2148
Chad Rosierf470cbb2011-11-04 00:50:21 +00002149 assert(DestVT == MVT::i32 && "ARM should always ext to i32");
2150
Chad Rosierb8703fe2012-02-17 01:21:28 +00002151 // Perform extension if flagged as either zext or sext. Otherwise, do
2152 // nothing.
2153 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) {
2154 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt());
2155 if (SrcReg == 0) return false;
2156 }
Chad Rosierf470cbb2011-11-04 00:50:21 +00002157 }
Jim Grosbach6b156392010-10-27 21:39:08 +00002158
Eric Christopher4f512ef2010-10-22 01:28:00 +00002159 // Make the copy.
Eric Christopher4f512ef2010-10-22 01:28:00 +00002160 unsigned DstReg = VA.getLocReg();
2161 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg);
2162 // Avoid a cross-class copy. This is very unlikely.
2163 if (!SrcRC->contains(DstReg))
2164 return false;
2165 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
2166 DstReg).addReg(SrcReg);
2167
Jakob Stoklund Olesenfc743272013-02-05 18:08:40 +00002168 // Add register to return instruction.
2169 RetRegs.push_back(VA.getLocReg());
Eric Christopher4f512ef2010-10-22 01:28:00 +00002170 }
Jim Grosbach6b156392010-10-27 21:39:08 +00002171
Chad Rosier66dc8ca2011-11-08 21:12:00 +00002172 unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET;
Jakob Stoklund Olesenfc743272013-02-05 18:08:40 +00002173 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
2174 TII.get(RetOpc));
2175 AddOptionalDefs(MIB);
2176 for (unsigned i = 0, e = RetRegs.size(); i != e; ++i)
2177 MIB.addReg(RetRegs[i], RegState::Implicit);
Eric Christopher4f512ef2010-10-22 01:28:00 +00002178 return true;
2179}
2180
Chad Rosier49d6fc02012-06-12 19:25:13 +00002181unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) {
2182 if (UseReg)
2183 return isThumb2 ? ARM::tBLXr : ARM::BLX;
2184 else
2185 return isThumb2 ? ARM::tBL : ARM::BL;
2186}
2187
2188unsigned ARMFastISel::getLibcallReg(const Twine &Name) {
2189 GlobalValue *GV = new GlobalVariable(Type::getInt32Ty(*Context), false,
2190 GlobalValue::ExternalLinkage, 0, Name);
Chad Rosier316a5aa2012-12-17 19:59:43 +00002191 EVT LCREVT = TLI.getValueType(GV->getType());
2192 if (!LCREVT.isSimple()) return 0;
2193 return ARMMaterializeGV(GV, LCREVT.getSimpleVT());
Eric Christopher872f4a22011-02-22 01:37:10 +00002194}
2195
Eric Christopherbb3e5da2010-09-14 23:03:37 +00002196// A quick function that will emit a call for a named libcall in F with the
2197// vector of passed arguments for the Instruction in I. We can assume that we
Eric Christopherdccd2c32010-10-11 08:38:55 +00002198// can emit a call for any libcall we can produce. This is an abridged version
2199// of the full call infrastructure since we won't need to worry about things
Eric Christopherbb3e5da2010-09-14 23:03:37 +00002200// like computed function pointers or strange arguments at call sites.
2201// TODO: Try to unify this and the normal call bits for ARM, then try to unify
2202// with X86.
Eric Christopher7ed8ec92010-09-28 01:21:42 +00002203bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
2204 CallingConv::ID CC = TLI.getLibcallCallingConv(Call);
Eric Christopherdccd2c32010-10-11 08:38:55 +00002205
Eric Christopherbb3e5da2010-09-14 23:03:37 +00002206 // Handle *simple* calls for now.
Chris Lattnerdb125cf2011-07-18 04:54:35 +00002207 Type *RetTy = I->getType();
Duncan Sands1440e8b2010-11-03 11:35:31 +00002208 MVT RetVT;
Eric Christopherbb3e5da2010-09-14 23:03:37 +00002209 if (RetTy->isVoidTy())
2210 RetVT = MVT::isVoid;
2211 else if (!isTypeLegal(RetTy, RetVT))
2212 return false;
Eric Christopherdccd2c32010-10-11 08:38:55 +00002213
Chad Rosier2a2e9d52012-05-11 18:51:55 +00002214 // Can't handle non-double multi-reg retvals.
Jush Luefc967e2012-06-14 06:08:19 +00002215 if (RetVT != MVT::isVoid && RetVT != MVT::i32) {
Chad Rosier2a2e9d52012-05-11 18:51:55 +00002216 SmallVector<CCValAssign, 16> RVLocs;
2217 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context);
Jush Luee649832012-07-19 09:49:00 +00002218 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, false));
Chad Rosier2a2e9d52012-05-11 18:51:55 +00002219 if (RVLocs.size() >= 2 && RetVT != MVT::f64)
2220 return false;
2221 }
2222
Eric Christophera9a7a1a2010-09-29 23:11:09 +00002223 // Set up the argument vectors.
Eric Christopherbb3e5da2010-09-14 23:03:37 +00002224 SmallVector<Value*, 8> Args;
2225 SmallVector<unsigned, 8> ArgRegs;
Duncan Sands1440e8b2010-11-03 11:35:31 +00002226 SmallVector<MVT, 8> ArgVTs;
Eric Christopherbb3e5da2010-09-14 23:03:37 +00002227 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
2228 Args.reserve(I->getNumOperands());
2229 ArgRegs.reserve(I->getNumOperands());
2230 ArgVTs.reserve(I->getNumOperands());
2231 ArgFlags.reserve(I->getNumOperands());
Eric Christopher7ed8ec92010-09-28 01:21:42 +00002232 for (unsigned i = 0; i < I->getNumOperands(); ++i) {
Eric Christopherbb3e5da2010-09-14 23:03:37 +00002233 Value *Op = I->getOperand(i);
2234 unsigned Arg = getRegForValue(Op);
2235 if (Arg == 0) return false;
Eric Christopherdccd2c32010-10-11 08:38:55 +00002236
Chris Lattnerdb125cf2011-07-18 04:54:35 +00002237 Type *ArgTy = Op->getType();
Duncan Sands1440e8b2010-11-03 11:35:31 +00002238 MVT ArgVT;
Eric Christopherbb3e5da2010-09-14 23:03:37 +00002239 if (!isTypeLegal(ArgTy, ArgVT)) return false;
Eric Christopherdccd2c32010-10-11 08:38:55 +00002240
Eric Christopherbb3e5da2010-09-14 23:03:37 +00002241 ISD::ArgFlagsTy Flags;
2242 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
2243 Flags.setOrigAlign(OriginalAlignment);
Eric Christopherdccd2c32010-10-11 08:38:55 +00002244
Eric Christopherbb3e5da2010-09-14 23:03:37 +00002245 Args.push_back(Op);
2246 ArgRegs.push_back(Arg);
2247 ArgVTs.push_back(ArgVT);
2248 ArgFlags.push_back(Flags);
2249 }
Eric Christopherdccd2c32010-10-11 08:38:55 +00002250
Eric Christophera9a7a1a2010-09-29 23:11:09 +00002251 // Handle the arguments now that we've gotten them.
Eric Christopherbb3e5da2010-09-14 23:03:37 +00002252 SmallVector<unsigned, 4> RegArgs;
Eric Christophera9a7a1a2010-09-29 23:11:09 +00002253 unsigned NumBytes;
Jush Luee649832012-07-19 09:49:00 +00002254 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
2255 RegArgs, CC, NumBytes, false))
Eric Christophera9a7a1a2010-09-29 23:11:09 +00002256 return false;
Eric Christopherdccd2c32010-10-11 08:38:55 +00002257
Chad Rosier49d6fc02012-06-12 19:25:13 +00002258 unsigned CalleeReg = 0;
2259 if (EnableARMLongCalls) {
2260 CalleeReg = getLibcallReg(TLI.getLibcallName(Call));
2261 if (CalleeReg == 0) return false;
2262 }
Eric Christopherdccd2c32010-10-11 08:38:55 +00002263
Chad Rosier49d6fc02012-06-12 19:25:13 +00002264 // Issue the call.
2265 unsigned CallOpc = ARMSelectCallOp(EnableARMLongCalls);
2266 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
2267 DL, TII.get(CallOpc));
Jakob Stoklund Olesen0745b642012-08-24 20:52:46 +00002268 // BL / BLX don't take a predicate, but tBL / tBLX do.
2269 if (isThumb2)
Chad Rosier49d6fc02012-06-12 19:25:13 +00002270 AddDefaultPred(MIB);
Jakob Stoklund Olesen0745b642012-08-24 20:52:46 +00002271 if (EnableARMLongCalls)
2272 MIB.addReg(CalleeReg);
2273 else
2274 MIB.addExternalSymbol(TLI.getLibcallName(Call));
Chad Rosier49d6fc02012-06-12 19:25:13 +00002275
Eric Christopherbb3e5da2010-09-14 23:03:37 +00002276 // Add implicit physical register uses to the call.
2277 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
Jakob Stoklund Olesen0745b642012-08-24 20:52:46 +00002278 MIB.addReg(RegArgs[i], RegState::Implicit);
Eric Christopherdccd2c32010-10-11 08:38:55 +00002279
Jakob Stoklund Olesenc54f6342012-02-24 01:19:29 +00002280 // Add a register mask with the call-preserved registers.
2281 // Proper defs for return values will be added by setPhysRegsDeadExcept().
2282 MIB.addRegMask(TRI.getCallPreservedMask(CC));
2283
Eric Christophera9a7a1a2010-09-29 23:11:09 +00002284 // Finish off the call including any return values.
Eric Christopherdccd2c32010-10-11 08:38:55 +00002285 SmallVector<unsigned, 4> UsedRegs;
Jush Luee649832012-07-19 09:49:00 +00002286 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, false)) return false;
Eric Christopherdccd2c32010-10-11 08:38:55 +00002287
Eric Christopherbb3e5da2010-09-14 23:03:37 +00002288 // Set all unused physreg defs as dead.
2289 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
Eric Christopherdccd2c32010-10-11 08:38:55 +00002290
Eric Christopherbb3e5da2010-09-14 23:03:37 +00002291 return true;
2292}
2293
Chad Rosier11add262011-11-11 23:31:03 +00002294bool ARMFastISel::SelectCall(const Instruction *I,
2295 const char *IntrMemName = 0) {
Eric Christopherf9764fa2010-09-30 20:49:44 +00002296 const CallInst *CI = cast<CallInst>(I);
2297 const Value *Callee = CI->getCalledValue();
2298
Chad Rosier11add262011-11-11 23:31:03 +00002299 // Can't handle inline asm.
2300 if (isa<InlineAsm>(Callee)) return false;
Eric Christopherf9764fa2010-09-30 20:49:44 +00002301
Chad Rosier425e9512012-12-11 00:18:02 +00002302 // Allow SelectionDAG isel to handle tail calls.
2303 if (CI->isTailCall()) return false;
2304
Eric Christopherf9764fa2010-09-30 20:49:44 +00002305 // Check the calling convention.
2306 ImmutableCallSite CS(CI);
2307 CallingConv::ID CC = CS.getCallingConv();
Eric Christopher4cf34c62010-10-18 06:49:12 +00002308
Eric Christopherf9764fa2010-09-30 20:49:44 +00002309 // TODO: Avoid some calling conventions?
Eric Christopherdccd2c32010-10-11 08:38:55 +00002310
Chris Lattnerdb125cf2011-07-18 04:54:35 +00002311 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
2312 FunctionType *FTy = cast<FunctionType>(PT->getElementType());
Jush Luee649832012-07-19 09:49:00 +00002313 bool isVarArg = FTy->isVarArg();
Eric Christopherdccd2c32010-10-11 08:38:55 +00002314
Eric Christopherf9764fa2010-09-30 20:49:44 +00002315 // Handle *simple* calls for now.
Chris Lattnerdb125cf2011-07-18 04:54:35 +00002316 Type *RetTy = I->getType();
Duncan Sands1440e8b2010-11-03 11:35:31 +00002317 MVT RetVT;
Eric Christopherf9764fa2010-09-30 20:49:44 +00002318 if (RetTy->isVoidTy())
2319 RetVT = MVT::isVoid;
Chad Rosier0eff39f2011-11-08 00:03:32 +00002320 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 &&
2321 RetVT != MVT::i8 && RetVT != MVT::i1)
Eric Christopherf9764fa2010-09-30 20:49:44 +00002322 return false;
Eric Christopherdccd2c32010-10-11 08:38:55 +00002323
Chad Rosier2a2e9d52012-05-11 18:51:55 +00002324 // Can't handle non-double multi-reg retvals.
2325 if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 &&
2326 RetVT != MVT::i16 && RetVT != MVT::i32) {
2327 SmallVector<CCValAssign, 16> RVLocs;
Jush Luee649832012-07-19 09:49:00 +00002328 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context);
2329 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg));
Chad Rosier2a2e9d52012-05-11 18:51:55 +00002330 if (RVLocs.size() >= 2 && RetVT != MVT::f64)
2331 return false;
2332 }
2333
Eric Christopherf9764fa2010-09-30 20:49:44 +00002334 // Set up the argument vectors.
2335 SmallVector<Value*, 8> Args;
2336 SmallVector<unsigned, 8> ArgRegs;
Duncan Sands1440e8b2010-11-03 11:35:31 +00002337 SmallVector<MVT, 8> ArgVTs;
Eric Christopherf9764fa2010-09-30 20:49:44 +00002338 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
Chad Rosier92fd0172012-02-15 00:23:55 +00002339 unsigned arg_size = CS.arg_size();
2340 Args.reserve(arg_size);
2341 ArgRegs.reserve(arg_size);
2342 ArgVTs.reserve(arg_size);
2343 ArgFlags.reserve(arg_size);
Eric Christopherf9764fa2010-09-30 20:49:44 +00002344 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
2345 i != e; ++i) {
Chad Rosier11add262011-11-11 23:31:03 +00002346 // If we're lowering a memory intrinsic instead of a regular call, skip the
2347 // last two arguments, which shouldn't be passed to the underlying function.
2348 if (IntrMemName && e-i <= 2)
2349 break;
Eric Christopherdccd2c32010-10-11 08:38:55 +00002350
Eric Christopherf9764fa2010-09-30 20:49:44 +00002351 ISD::ArgFlagsTy Flags;
2352 unsigned AttrInd = i - CS.arg_begin() + 1;
Bill Wendling034b94b2012-12-19 07:18:57 +00002353 if (CS.paramHasAttr(AttrInd, Attribute::SExt))
Eric Christopherf9764fa2010-09-30 20:49:44 +00002354 Flags.setSExt();
Bill Wendling034b94b2012-12-19 07:18:57 +00002355 if (CS.paramHasAttr(AttrInd, Attribute::ZExt))
Eric Christopherf9764fa2010-09-30 20:49:44 +00002356 Flags.setZExt();
2357
Chad Rosier8e4a2e42011-11-04 00:58:10 +00002358 // FIXME: Only handle *easy* calls for now.
Bill Wendling034b94b2012-12-19 07:18:57 +00002359 if (CS.paramHasAttr(AttrInd, Attribute::InReg) ||
2360 CS.paramHasAttr(AttrInd, Attribute::StructRet) ||
2361 CS.paramHasAttr(AttrInd, Attribute::Nest) ||
2362 CS.paramHasAttr(AttrInd, Attribute::ByVal))
Eric Christopherf9764fa2010-09-30 20:49:44 +00002363 return false;
2364
Chris Lattnerdb125cf2011-07-18 04:54:35 +00002365 Type *ArgTy = (*i)->getType();
Duncan Sands1440e8b2010-11-03 11:35:31 +00002366 MVT ArgVT;
Chad Rosier42536af2011-11-05 20:16:15 +00002367 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 &&
2368 ArgVT != MVT::i1)
Eric Christopherf9764fa2010-09-30 20:49:44 +00002369 return false;
Chad Rosier424fe0e2011-11-18 01:17:34 +00002370
2371 unsigned Arg = getRegForValue(*i);
2372 if (Arg == 0)
2373 return false;
2374
Eric Christopherf9764fa2010-09-30 20:49:44 +00002375 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
2376 Flags.setOrigAlign(OriginalAlignment);
Eric Christopherdccd2c32010-10-11 08:38:55 +00002377
Eric Christopherf9764fa2010-09-30 20:49:44 +00002378 Args.push_back(*i);
2379 ArgRegs.push_back(Arg);
2380 ArgVTs.push_back(ArgVT);
2381 ArgFlags.push_back(Flags);
2382 }
Eric Christopherdccd2c32010-10-11 08:38:55 +00002383
Eric Christopherf9764fa2010-09-30 20:49:44 +00002384 // Handle the arguments now that we've gotten them.
2385 SmallVector<unsigned, 4> RegArgs;
2386 unsigned NumBytes;
Jush Luee649832012-07-19 09:49:00 +00002387 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
2388 RegArgs, CC, NumBytes, isVarArg))
Eric Christopherf9764fa2010-09-30 20:49:44 +00002389 return false;
Eric Christopherdccd2c32010-10-11 08:38:55 +00002390
Chad Rosier49d6fc02012-06-12 19:25:13 +00002391 bool UseReg = false;
Chad Rosier1c8fccb2012-05-23 18:38:57 +00002392 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee);
Chad Rosier49d6fc02012-06-12 19:25:13 +00002393 if (!GV || EnableARMLongCalls) UseReg = true;
Chad Rosier1c8fccb2012-05-23 18:38:57 +00002394
Chad Rosier49d6fc02012-06-12 19:25:13 +00002395 unsigned CalleeReg = 0;
2396 if (UseReg) {
2397 if (IntrMemName)
2398 CalleeReg = getLibcallReg(IntrMemName);
2399 else
2400 CalleeReg = getRegForValue(Callee);
2401
Chad Rosier1c8fccb2012-05-23 18:38:57 +00002402 if (CalleeReg == 0) return false;
2403 }
2404
Chad Rosier49d6fc02012-06-12 19:25:13 +00002405 // Issue the call.
2406 unsigned CallOpc = ARMSelectCallOp(UseReg);
2407 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
2408 DL, TII.get(CallOpc));
Chad Rosier49d6fc02012-06-12 19:25:13 +00002409
Jakob Stoklund Olesen0745b642012-08-24 20:52:46 +00002410 // ARM calls don't take a predicate, but tBL / tBLX do.
2411 if(isThumb2)
Chad Rosier49d6fc02012-06-12 19:25:13 +00002412 AddDefaultPred(MIB);
Jakob Stoklund Olesen0745b642012-08-24 20:52:46 +00002413 if (UseReg)
2414 MIB.addReg(CalleeReg);
2415 else if (!IntrMemName)
2416 MIB.addGlobalAddress(GV, 0, 0);
2417 else
2418 MIB.addExternalSymbol(IntrMemName, 0);
Jush Luefc967e2012-06-14 06:08:19 +00002419
Eric Christopherf9764fa2010-09-30 20:49:44 +00002420 // Add implicit physical register uses to the call.
2421 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
Jakob Stoklund Olesen0745b642012-08-24 20:52:46 +00002422 MIB.addReg(RegArgs[i], RegState::Implicit);
Eric Christopherdccd2c32010-10-11 08:38:55 +00002423
Jakob Stoklund Olesenc54f6342012-02-24 01:19:29 +00002424 // Add a register mask with the call-preserved registers.
2425 // Proper defs for return values will be added by setPhysRegsDeadExcept().
2426 MIB.addRegMask(TRI.getCallPreservedMask(CC));
2427
Eric Christopherf9764fa2010-09-30 20:49:44 +00002428 // Finish off the call including any return values.
Eric Christopherdccd2c32010-10-11 08:38:55 +00002429 SmallVector<unsigned, 4> UsedRegs;
Jush Luee649832012-07-19 09:49:00 +00002430 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, isVarArg))
2431 return false;
Eric Christopherdccd2c32010-10-11 08:38:55 +00002432
Eric Christopherf9764fa2010-09-30 20:49:44 +00002433 // Set all unused physreg defs as dead.
2434 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
Eric Christopherdccd2c32010-10-11 08:38:55 +00002435
Eric Christopherf9764fa2010-09-30 20:49:44 +00002436 return true;
Eric Christopherf9764fa2010-09-30 20:49:44 +00002437}
2438
Chad Rosier2c42b8c2011-11-14 23:04:09 +00002439bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) {
Chad Rosier909cb4f2011-11-14 22:46:17 +00002440 return Len <= 16;
2441}
2442
Jim Grosbachd4f020a2012-04-06 23:43:50 +00002443bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src,
Chad Rosierc9758b12012-12-06 01:34:31 +00002444 uint64_t Len, unsigned Alignment) {
Chad Rosier909cb4f2011-11-14 22:46:17 +00002445 // Make sure we don't bloat code by inlining very large memcpy's.
Chad Rosier2c42b8c2011-11-14 23:04:09 +00002446 if (!ARMIsMemCpySmall(Len))
Chad Rosier909cb4f2011-11-14 22:46:17 +00002447 return false;
2448
Chad Rosier909cb4f2011-11-14 22:46:17 +00002449 while (Len) {
2450 MVT VT;
Chad Rosierc9758b12012-12-06 01:34:31 +00002451 if (!Alignment || Alignment >= 4) {
2452 if (Len >= 4)
2453 VT = MVT::i32;
2454 else if (Len >= 2)
2455 VT = MVT::i16;
2456 else {
2457 assert (Len == 1 && "Expected a length of 1!");
2458 VT = MVT::i8;
2459 }
2460 } else {
2461 // Bound based on alignment.
2462 if (Len >= 2 && Alignment == 2)
2463 VT = MVT::i16;
2464 else {
Chad Rosierc9758b12012-12-06 01:34:31 +00002465 VT = MVT::i8;
2466 }
Chad Rosier909cb4f2011-11-14 22:46:17 +00002467 }
2468
2469 bool RV;
2470 unsigned ResultReg;
2471 RV = ARMEmitLoad(VT, ResultReg, Src);
Eric Christopherfae699a2012-01-11 20:55:27 +00002472 assert (RV == true && "Should be able to handle this load.");
Chad Rosier909cb4f2011-11-14 22:46:17 +00002473 RV = ARMEmitStore(VT, ResultReg, Dest);
Eric Christopherfae699a2012-01-11 20:55:27 +00002474 assert (RV == true && "Should be able to handle this store.");
Duncan Sands5b8a1db2012-02-05 14:20:11 +00002475 (void)RV;
Chad Rosier909cb4f2011-11-14 22:46:17 +00002476
2477 unsigned Size = VT.getSizeInBits()/8;
2478 Len -= Size;
2479 Dest.Offset += Size;
2480 Src.Offset += Size;
2481 }
2482
2483 return true;
2484}
2485
Chad Rosier11add262011-11-11 23:31:03 +00002486bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) {
2487 // FIXME: Handle more intrinsics.
2488 switch (I.getIntrinsicID()) {
2489 default: return false;
Chad Rosierada759d2012-05-30 17:23:22 +00002490 case Intrinsic::frameaddress: {
2491 MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo();
2492 MFI->setFrameAddressIsTaken(true);
2493
2494 unsigned LdrOpc;
2495 const TargetRegisterClass *RC;
2496 if (isThumb2) {
2497 LdrOpc = ARM::t2LDRi12;
2498 RC = (const TargetRegisterClass*)&ARM::tGPRRegClass;
2499 } else {
2500 LdrOpc = ARM::LDRi12;
2501 RC = (const TargetRegisterClass*)&ARM::GPRRegClass;
2502 }
2503
2504 const ARMBaseRegisterInfo *RegInfo =
2505 static_cast<const ARMBaseRegisterInfo*>(TM.getRegisterInfo());
2506 unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF));
2507 unsigned SrcReg = FramePtr;
2508
2509 // Recursively load frame address
2510 // ldr r0 [fp]
2511 // ldr r0 [r0]
2512 // ldr r0 [r0]
2513 // ...
2514 unsigned DestReg;
2515 unsigned Depth = cast<ConstantInt>(I.getOperand(0))->getZExtValue();
2516 while (Depth--) {
2517 DestReg = createResultReg(RC);
2518 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
2519 TII.get(LdrOpc), DestReg)
2520 .addReg(SrcReg).addImm(0));
2521 SrcReg = DestReg;
2522 }
Chad Rosierbbff4ee2012-06-01 21:12:31 +00002523 UpdateValueMap(&I, SrcReg);
Chad Rosierada759d2012-05-30 17:23:22 +00002524 return true;
2525 }
Chad Rosier11add262011-11-11 23:31:03 +00002526 case Intrinsic::memcpy:
2527 case Intrinsic::memmove: {
Chad Rosier11add262011-11-11 23:31:03 +00002528 const MemTransferInst &MTI = cast<MemTransferInst>(I);
2529 // Don't handle volatile.
2530 if (MTI.isVolatile())
2531 return false;
Chad Rosier909cb4f2011-11-14 22:46:17 +00002532
2533 // Disable inlining for memmove before calls to ComputeAddress. Otherwise,
2534 // we would emit dead code because we don't currently handle memmoves.
2535 bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy);
2536 if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) {
Chad Rosier2c42b8c2011-11-14 23:04:09 +00002537 // Small memcpy's are common enough that we want to do them without a call
2538 // if possible.
Chad Rosier909cb4f2011-11-14 22:46:17 +00002539 uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue();
Chad Rosier2c42b8c2011-11-14 23:04:09 +00002540 if (ARMIsMemCpySmall(Len)) {
Chad Rosier909cb4f2011-11-14 22:46:17 +00002541 Address Dest, Src;
2542 if (!ARMComputeAddress(MTI.getRawDest(), Dest) ||
2543 !ARMComputeAddress(MTI.getRawSource(), Src))
2544 return false;
Chad Rosierc9758b12012-12-06 01:34:31 +00002545 unsigned Alignment = MTI.getAlignment();
2546 if (ARMTryEmitSmallMemCpy(Dest, Src, Len, Alignment))
Chad Rosier909cb4f2011-11-14 22:46:17 +00002547 return true;
2548 }
2549 }
Jush Luefc967e2012-06-14 06:08:19 +00002550
Chad Rosier11add262011-11-11 23:31:03 +00002551 if (!MTI.getLength()->getType()->isIntegerTy(32))
2552 return false;
Jush Luefc967e2012-06-14 06:08:19 +00002553
Chad Rosier11add262011-11-11 23:31:03 +00002554 if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255)
2555 return false;
2556
2557 const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove";
2558 return SelectCall(&I, IntrMemName);
2559 }
2560 case Intrinsic::memset: {
2561 const MemSetInst &MSI = cast<MemSetInst>(I);
2562 // Don't handle volatile.
2563 if (MSI.isVolatile())
2564 return false;
Jush Luefc967e2012-06-14 06:08:19 +00002565
Chad Rosier11add262011-11-11 23:31:03 +00002566 if (!MSI.getLength()->getType()->isIntegerTy(32))
2567 return false;
Jush Luefc967e2012-06-14 06:08:19 +00002568
Chad Rosier11add262011-11-11 23:31:03 +00002569 if (MSI.getDestAddressSpace() > 255)
2570 return false;
Jush Luefc967e2012-06-14 06:08:19 +00002571
Chad Rosier11add262011-11-11 23:31:03 +00002572 return SelectCall(&I, "memset");
2573 }
Chad Rosier226ddf52012-05-11 21:33:49 +00002574 case Intrinsic::trap: {
Eli Bendersky0f156af2013-01-30 16:30:19 +00002575 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(
2576 Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP));
Chad Rosier226ddf52012-05-11 21:33:49 +00002577 return true;
2578 }
Chad Rosier11add262011-11-11 23:31:03 +00002579 }
Chad Rosier11add262011-11-11 23:31:03 +00002580}
2581
Chad Rosier0d7b2312011-11-02 00:18:48 +00002582bool ARMFastISel::SelectTrunc(const Instruction *I) {
Jush Luefc967e2012-06-14 06:08:19 +00002583 // The high bits for a type smaller than the register size are assumed to be
Chad Rosier0d7b2312011-11-02 00:18:48 +00002584 // undefined.
2585 Value *Op = I->getOperand(0);
2586
2587 EVT SrcVT, DestVT;
2588 SrcVT = TLI.getValueType(Op->getType(), true);
2589 DestVT = TLI.getValueType(I->getType(), true);
2590
2591 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
2592 return false;
2593 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
2594 return false;
2595
2596 unsigned SrcReg = getRegForValue(Op);
2597 if (!SrcReg) return false;
2598
2599 // Because the high bits are undefined, a truncate doesn't generate
2600 // any code.
2601 UpdateValueMap(I, SrcReg);
2602 return true;
2603}
2604
Chad Rosier316a5aa2012-12-17 19:59:43 +00002605unsigned ARMFastISel::ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
Chad Rosier87633022011-11-02 17:20:24 +00002606 bool isZExt) {
Eli Friedman76927d732011-05-25 23:49:02 +00002607 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8)
Chad Rosier87633022011-11-02 17:20:24 +00002608 return 0;
JF Bastien8fc760c2013-06-07 20:10:37 +00002609 if (SrcVT != MVT::i16 && SrcVT != MVT::i8 && SrcVT != MVT::i1)
Chad Rosier87633022011-11-02 17:20:24 +00002610 return 0;
JF Bastien8fc760c2013-06-07 20:10:37 +00002611
2612 // Table of which combinations can be emitted as a single instruction,
2613 // and which will require two.
2614 static const uint8_t isSingleInstrTbl[3][2][2][2] = {
2615 // ARM Thumb
2616 // !hasV6Ops hasV6Ops !hasV6Ops hasV6Ops
2617 // ext: s z s z s z s z
2618 /* 1 */ { { { 0, 1 }, { 0, 1 } }, { { 0, 0 }, { 0, 1 } } },
2619 /* 8 */ { { { 0, 1 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } },
2620 /* 16 */ { { { 0, 0 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } }
2621 };
2622
2623 // Target registers for:
2624 // - For ARM can never be PC.
2625 // - For 16-bit Thumb are restricted to lower 8 registers.
2626 // - For 32-bit Thumb are restricted to non-SP and non-PC.
2627 static const TargetRegisterClass *RCTbl[2][2] = {
2628 // Instructions: Two Single
2629 /* ARM */ { &ARM::GPRnopcRegClass, &ARM::GPRnopcRegClass },
2630 /* Thumb */ { &ARM::tGPRRegClass, &ARM::rGPRRegClass }
2631 };
2632
2633 // Table governing the instruction(s) to be emitted.
2634 static const struct {
2635 // First entry for each of the following is sext, second zext.
2636 uint16_t Opc[2];
2637 uint8_t Imm[2]; // All instructions have either a shift or a mask.
2638 uint8_t hasS[2]; // Some instructions have an S bit, always set it to 0.
2639 } OpcTbl[2][2][3] = {
2640 { // Two instructions (first is left shift, second is in this table).
2641 { // ARM
2642 /* 1 */ { { ARM::ASRi, ARM::LSRi }, { 31, 31 }, { 1, 1 } },
2643 /* 8 */ { { ARM::ASRi, ARM::LSRi }, { 24, 24 }, { 1, 1 } },
2644 /* 16 */ { { ARM::ASRi, ARM::LSRi }, { 16, 16 }, { 1, 1 } }
2645 },
2646 { // Thumb
2647 /* 1 */ { { ARM::tASRri, ARM::tLSRri }, { 31, 31 }, { 0, 0 } },
2648 /* 8 */ { { ARM::tASRri, ARM::tLSRri }, { 24, 24 }, { 0, 0 } },
2649 /* 16 */ { { ARM::tASRri, ARM::tLSRri }, { 16, 16 }, { 0, 0 } }
2650 }
2651 },
2652 { // Single instruction.
2653 { // ARM
2654 /* 1 */ { { ARM::KILL, ARM::ANDri }, { 0, 1 }, { 0, 1 } },
2655 /* 8 */ { { ARM::SXTB, ARM::ANDri }, { 0, 255 }, { 0, 1 } },
2656 /* 16 */ { { ARM::SXTH, ARM::UXTH }, { 0, 0 }, { 0, 0 } }
2657 },
2658 { // Thumb
2659 /* 1 */ { { ARM::KILL, ARM::t2ANDri }, { 0, 1 }, { 0, 1 } },
2660 /* 8 */ { { ARM::t2SXTB, ARM::t2ANDri }, { 0, 255 }, { 0, 1 } },
2661 /* 16 */ { { ARM::t2SXTH, ARM::t2UXTH }, { 0, 0 }, { 0, 0 } }
2662 }
2663 }
2664 };
2665
2666 unsigned SrcBits = SrcVT.getSizeInBits();
2667 unsigned DestBits = DestVT.getSizeInBits();
JF Bastien2c69e902013-06-08 00:51:51 +00002668 (void) DestBits;
JF Bastien8fc760c2013-06-07 20:10:37 +00002669 assert((SrcBits < DestBits) && "can only extend to larger types");
2670 assert((DestBits == 32 || DestBits == 16 || DestBits == 8) &&
2671 "other sizes unimplemented");
2672 assert((SrcBits == 16 || SrcBits == 8 || SrcBits == 1) &&
2673 "other sizes unimplemented");
2674
2675 bool hasV6Ops = Subtarget->hasV6Ops();
2676 unsigned Bitness = countTrailingZeros(SrcBits) >> 1; // {1,8,16}=>{0,1,2}
2677 assert((Bitness < 3) && "sanity-check table bounds");
2678
2679 bool isSingleInstr = isSingleInstrTbl[Bitness][isThumb2][hasV6Ops][isZExt];
2680 const TargetRegisterClass *RC = RCTbl[isThumb2][isSingleInstr];
2681 unsigned Opc = OpcTbl[isSingleInstr][isThumb2][Bitness].Opc[isZExt];
2682 assert(ARM::KILL != Opc && "Invalid table entry");
2683 unsigned Imm = OpcTbl[isSingleInstr][isThumb2][Bitness].Imm[isZExt];
2684 unsigned hasS = OpcTbl[isSingleInstr][isThumb2][Bitness].hasS[isZExt];
2685
2686 // 16-bit Thumb instructions always set CPSR (unless they're in an IT block).
2687 bool setsCPSR = &ARM::tGPRRegClass == RC;
2688 unsigned LSLOpc = isThumb2 ? ARM::tLSLri : ARM::LSLi;
2689 unsigned ResultReg;
2690
2691 // Either one or two instructions are emitted.
2692 // They're always of the form:
2693 // dst = in OP imm
2694 // CPSR is set only by 16-bit Thumb instructions.
2695 // Predicate, if any, is AL.
2696 // S bit, if available, is always 0.
2697 // When two are emitted the first's result will feed as the second's input,
2698 // that value is then dead.
2699 unsigned NumInstrsEmitted = isSingleInstr ? 1 : 2;
2700 for (unsigned Instr = 0; Instr != NumInstrsEmitted; ++Instr) {
2701 ResultReg = createResultReg(RC);
2702 unsigned Opcode = ((0 == Instr) && !isSingleInstr) ? LSLOpc : Opc;
2703 bool isKill = 1 == Instr;
2704 MachineInstrBuilder MIB = BuildMI(
2705 *FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opcode), ResultReg);
2706 if (setsCPSR)
2707 MIB.addReg(ARM::CPSR, RegState::Define);
2708 AddDefaultPred(MIB.addReg(SrcReg, isKill * RegState::Kill).addImm(Imm));
2709 if (hasS)
2710 AddDefaultCC(MIB);
2711 // Second instruction consumes the first's result.
2712 SrcReg = ResultReg;
Eli Friedman76927d732011-05-25 23:49:02 +00002713 }
2714
Chad Rosier87633022011-11-02 17:20:24 +00002715 return ResultReg;
2716}
2717
2718bool ARMFastISel::SelectIntExt(const Instruction *I) {
2719 // On ARM, in general, integer casts don't involve legal types; this code
2720 // handles promotable integers.
Chad Rosier87633022011-11-02 17:20:24 +00002721 Type *DestTy = I->getType();
2722 Value *Src = I->getOperand(0);
2723 Type *SrcTy = Src->getType();
2724
Chad Rosier87633022011-11-02 17:20:24 +00002725 bool isZExt = isa<ZExtInst>(I);
2726 unsigned SrcReg = getRegForValue(Src);
2727 if (!SrcReg) return false;
2728
Chad Rosier316a5aa2012-12-17 19:59:43 +00002729 EVT SrcEVT, DestEVT;
2730 SrcEVT = TLI.getValueType(SrcTy, true);
2731 DestEVT = TLI.getValueType(DestTy, true);
2732 if (!SrcEVT.isSimple()) return false;
2733 if (!DestEVT.isSimple()) return false;
Patrik Hagglund3d170e62012-12-17 14:30:06 +00002734
Chad Rosier316a5aa2012-12-17 19:59:43 +00002735 MVT SrcVT = SrcEVT.getSimpleVT();
2736 MVT DestVT = DestEVT.getSimpleVT();
Chad Rosier87633022011-11-02 17:20:24 +00002737 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt);
2738 if (ResultReg == 0) return false;
2739 UpdateValueMap(I, ResultReg);
Eli Friedman76927d732011-05-25 23:49:02 +00002740 return true;
2741}
2742
Jush Lu29465492012-08-03 02:37:48 +00002743bool ARMFastISel::SelectShift(const Instruction *I,
2744 ARM_AM::ShiftOpc ShiftTy) {
2745 // We handle thumb2 mode by target independent selector
2746 // or SelectionDAG ISel.
2747 if (isThumb2)
2748 return false;
2749
2750 // Only handle i32 now.
2751 EVT DestVT = TLI.getValueType(I->getType(), true);
2752 if (DestVT != MVT::i32)
2753 return false;
2754
2755 unsigned Opc = ARM::MOVsr;
2756 unsigned ShiftImm;
2757 Value *Src2Value = I->getOperand(1);
2758 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Src2Value)) {
2759 ShiftImm = CI->getZExtValue();
2760
2761 // Fall back to selection DAG isel if the shift amount
2762 // is zero or greater than the width of the value type.
2763 if (ShiftImm == 0 || ShiftImm >=32)
2764 return false;
2765
2766 Opc = ARM::MOVsi;
2767 }
2768
2769 Value *Src1Value = I->getOperand(0);
2770 unsigned Reg1 = getRegForValue(Src1Value);
2771 if (Reg1 == 0) return false;
2772
Nadav Roteme7576402012-09-06 11:13:55 +00002773 unsigned Reg2 = 0;
Jush Lu29465492012-08-03 02:37:48 +00002774 if (Opc == ARM::MOVsr) {
2775 Reg2 = getRegForValue(Src2Value);
2776 if (Reg2 == 0) return false;
2777 }
2778
JF Bastiena9a8a122013-05-29 15:45:47 +00002779 unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass);
Jush Lu29465492012-08-03 02:37:48 +00002780 if(ResultReg == 0) return false;
2781
2782 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
2783 TII.get(Opc), ResultReg)
2784 .addReg(Reg1);
2785
2786 if (Opc == ARM::MOVsi)
2787 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, ShiftImm));
2788 else if (Opc == ARM::MOVsr) {
2789 MIB.addReg(Reg2);
2790 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, 0));
2791 }
2792
2793 AddOptionalDefs(MIB);
2794 UpdateValueMap(I, ResultReg);
2795 return true;
2796}
2797
Eric Christopher56d2b722010-09-02 23:43:26 +00002798// TODO: SoftFP support.
Eric Christopherab695882010-07-21 22:26:11 +00002799bool ARMFastISel::TargetSelectInstruction(const Instruction *I) {
Eric Christopherac1a19e2010-09-09 01:06:51 +00002800
Eric Christopherab695882010-07-21 22:26:11 +00002801 switch (I->getOpcode()) {
Eric Christopher83007122010-08-23 21:44:12 +00002802 case Instruction::Load:
Eric Christopher43b62be2010-09-27 06:02:23 +00002803 return SelectLoad(I);
Eric Christopher543cf052010-09-01 22:16:27 +00002804 case Instruction::Store:
Eric Christopher43b62be2010-09-27 06:02:23 +00002805 return SelectStore(I);
Eric Christophere5734102010-09-03 00:35:47 +00002806 case Instruction::Br:
Eric Christopher43b62be2010-09-27 06:02:23 +00002807 return SelectBranch(I);
Chad Rosier60c8fa62012-02-07 23:56:08 +00002808 case Instruction::IndirectBr:
2809 return SelectIndirectBr(I);
Eric Christopherd43393a2010-09-08 23:13:45 +00002810 case Instruction::ICmp:
2811 case Instruction::FCmp:
Eric Christopher43b62be2010-09-27 06:02:23 +00002812 return SelectCmp(I);
Eric Christopher46203602010-09-09 00:26:48 +00002813 case Instruction::FPExt:
Eric Christopher43b62be2010-09-27 06:02:23 +00002814 return SelectFPExt(I);
Eric Christopherce07b542010-09-09 20:26:31 +00002815 case Instruction::FPTrunc:
Eric Christopher43b62be2010-09-27 06:02:23 +00002816 return SelectFPTrunc(I);
Eric Christopher9a040492010-09-09 18:54:59 +00002817 case Instruction::SIToFP:
Chad Rosierae46a332012-02-03 21:14:11 +00002818 return SelectIToFP(I, /*isSigned*/ true);
Chad Rosier36b7beb2012-02-03 19:42:52 +00002819 case Instruction::UIToFP:
Chad Rosierae46a332012-02-03 21:14:11 +00002820 return SelectIToFP(I, /*isSigned*/ false);
Eric Christopher9a040492010-09-09 18:54:59 +00002821 case Instruction::FPToSI:
Chad Rosierae46a332012-02-03 21:14:11 +00002822 return SelectFPToI(I, /*isSigned*/ true);
Chad Rosieree8901c2012-02-03 20:27:51 +00002823 case Instruction::FPToUI:
Chad Rosierae46a332012-02-03 21:14:11 +00002824 return SelectFPToI(I, /*isSigned*/ false);
Chad Rosier3901c3e2012-02-06 23:50:07 +00002825 case Instruction::Add:
2826 return SelectBinaryIntOp(I, ISD::ADD);
Chad Rosier6fde8752012-02-08 02:29:21 +00002827 case Instruction::Or:
2828 return SelectBinaryIntOp(I, ISD::OR);
Chad Rosier743e1992012-02-08 02:45:44 +00002829 case Instruction::Sub:
2830 return SelectBinaryIntOp(I, ISD::SUB);
Eric Christopherbc39b822010-09-09 00:53:57 +00002831 case Instruction::FAdd:
Chad Rosier3901c3e2012-02-06 23:50:07 +00002832 return SelectBinaryFPOp(I, ISD::FADD);
Eric Christopherbc39b822010-09-09 00:53:57 +00002833 case Instruction::FSub:
Chad Rosier3901c3e2012-02-06 23:50:07 +00002834 return SelectBinaryFPOp(I, ISD::FSUB);
Eric Christopherbc39b822010-09-09 00:53:57 +00002835 case Instruction::FMul:
Chad Rosier3901c3e2012-02-06 23:50:07 +00002836 return SelectBinaryFPOp(I, ISD::FMUL);
Eric Christopherbb3e5da2010-09-14 23:03:37 +00002837 case Instruction::SDiv:
Chad Rosier7ccb30b2012-02-03 21:07:27 +00002838 return SelectDiv(I, /*isSigned*/ true);
2839 case Instruction::UDiv:
2840 return SelectDiv(I, /*isSigned*/ false);
Eric Christopher6a880d62010-10-11 08:37:26 +00002841 case Instruction::SRem:
Chad Rosier769422f2012-02-03 21:23:45 +00002842 return SelectRem(I, /*isSigned*/ true);
2843 case Instruction::URem:
2844 return SelectRem(I, /*isSigned*/ false);
Eric Christopherf9764fa2010-09-30 20:49:44 +00002845 case Instruction::Call:
Chad Rosier11add262011-11-11 23:31:03 +00002846 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
2847 return SelectIntrinsicCall(*II);
Eric Christopherf9764fa2010-09-30 20:49:44 +00002848 return SelectCall(I);
Eric Christopher3bbd3962010-10-11 08:27:59 +00002849 case Instruction::Select:
2850 return SelectSelect(I);
Eric Christopher4f512ef2010-10-22 01:28:00 +00002851 case Instruction::Ret:
2852 return SelectRet(I);
Eli Friedman76927d732011-05-25 23:49:02 +00002853 case Instruction::Trunc:
Chad Rosier0d7b2312011-11-02 00:18:48 +00002854 return SelectTrunc(I);
Eli Friedman76927d732011-05-25 23:49:02 +00002855 case Instruction::ZExt:
2856 case Instruction::SExt:
Chad Rosier0d7b2312011-11-02 00:18:48 +00002857 return SelectIntExt(I);
Jush Lu29465492012-08-03 02:37:48 +00002858 case Instruction::Shl:
2859 return SelectShift(I, ARM_AM::lsl);
2860 case Instruction::LShr:
2861 return SelectShift(I, ARM_AM::lsr);
2862 case Instruction::AShr:
2863 return SelectShift(I, ARM_AM::asr);
Eric Christopherab695882010-07-21 22:26:11 +00002864 default: break;
2865 }
2866 return false;
2867}
2868
JF Bastien5ab77042013-06-11 22:13:46 +00002869namespace {
2870// This table describes sign- and zero-extend instructions which can be
2871// folded into a preceding load. All of these extends have an immediate
2872// (sometimes a mask and sometimes a shift) that's applied after
2873// extension.
2874const struct FoldableLoadExtendsStruct {
2875 uint16_t Opc[2]; // ARM, Thumb.
2876 uint8_t ExpectedImm;
2877 uint8_t isZExt : 1;
2878 uint8_t ExpectedVT : 7;
2879} FoldableLoadExtends[] = {
2880 { { ARM::SXTH, ARM::t2SXTH }, 0, 0, MVT::i16 },
2881 { { ARM::UXTH, ARM::t2UXTH }, 0, 1, MVT::i16 },
2882 { { ARM::ANDri, ARM::t2ANDri }, 255, 1, MVT::i8 },
2883 { { ARM::SXTB, ARM::t2SXTB }, 0, 0, MVT::i8 },
2884 { { ARM::UXTB, ARM::t2UXTB }, 0, 1, MVT::i8 }
2885};
2886}
2887
Eli Bendersky75299e32013-04-19 22:29:18 +00002888/// \brief The specified machine instr operand is a vreg, and that
Chad Rosierb29b9502011-11-13 02:23:59 +00002889/// vreg is being provided by the specified load instruction. If possible,
2890/// try to fold the load as an operand to the instruction, returning true if
2891/// successful.
Eli Bendersky75299e32013-04-19 22:29:18 +00002892bool ARMFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
2893 const LoadInst *LI) {
Chad Rosierb29b9502011-11-13 02:23:59 +00002894 // Verify we have a legal type before going any further.
2895 MVT VT;
2896 if (!isLoadTypeLegal(LI->getType(), VT))
2897 return false;
2898
2899 // Combine load followed by zero- or sign-extend.
2900 // ldrb r1, [r0] ldrb r1, [r0]
2901 // uxtb r2, r1 =>
2902 // mov r3, r2 mov r3, r1
JF Bastien5ab77042013-06-11 22:13:46 +00002903 if (MI->getNumOperands() < 3 || !MI->getOperand(2).isImm())
2904 return false;
2905 const uint64_t Imm = MI->getOperand(2).getImm();
2906
2907 bool Found = false;
2908 bool isZExt;
2909 for (unsigned i = 0, e = array_lengthof(FoldableLoadExtends);
2910 i != e; ++i) {
2911 if (FoldableLoadExtends[i].Opc[isThumb2] == MI->getOpcode() &&
2912 (uint64_t)FoldableLoadExtends[i].ExpectedImm == Imm &&
2913 MVT((MVT::SimpleValueType)FoldableLoadExtends[i].ExpectedVT) == VT) {
2914 Found = true;
2915 isZExt = FoldableLoadExtends[i].isZExt;
2916 }
Chad Rosierb29b9502011-11-13 02:23:59 +00002917 }
JF Bastien5ab77042013-06-11 22:13:46 +00002918 if (!Found) return false;
2919
Chad Rosierb29b9502011-11-13 02:23:59 +00002920 // See if we can handle this address.
2921 Address Addr;
2922 if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false;
Jush Luefc967e2012-06-14 06:08:19 +00002923
Chad Rosierb29b9502011-11-13 02:23:59 +00002924 unsigned ResultReg = MI->getOperand(0).getReg();
Chad Rosier8a9bce92011-12-13 19:22:14 +00002925 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false))
Chad Rosierb29b9502011-11-13 02:23:59 +00002926 return false;
2927 MI->eraseFromParent();
2928 return true;
2929}
2930
Jush Lu8f506472012-09-27 05:21:41 +00002931unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV,
Patrik Hagglunda61b17c2012-12-13 06:34:11 +00002932 unsigned Align, MVT VT) {
Jush Lu8f506472012-09-27 05:21:41 +00002933 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility();
2934 ARMConstantPoolConstant *CPV =
2935 ARMConstantPoolConstant::Create(GV, UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT);
2936 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align);
2937
2938 unsigned Opc;
2939 unsigned DestReg1 = createResultReg(TLI.getRegClassFor(VT));
2940 // Load value.
2941 if (isThumb2) {
2942 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
2943 TII.get(ARM::t2LDRpci), DestReg1)
2944 .addConstantPoolIndex(Idx));
2945 Opc = UseGOTOFF ? ARM::t2ADDrr : ARM::t2LDRs;
2946 } else {
2947 // The extra immediate is for addrmode2.
2948 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
2949 DL, TII.get(ARM::LDRcp), DestReg1)
2950 .addConstantPoolIndex(Idx).addImm(0));
2951 Opc = UseGOTOFF ? ARM::ADDrr : ARM::LDRrs;
2952 }
2953
2954 unsigned GlobalBaseReg = AFI->getGlobalBaseReg();
2955 if (GlobalBaseReg == 0) {
2956 GlobalBaseReg = MRI.createVirtualRegister(TLI.getRegClassFor(VT));
2957 AFI->setGlobalBaseReg(GlobalBaseReg);
2958 }
2959
2960 unsigned DestReg2 = createResultReg(TLI.getRegClassFor(VT));
2961 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
2962 DL, TII.get(Opc), DestReg2)
2963 .addReg(DestReg1)
2964 .addReg(GlobalBaseReg);
2965 if (!UseGOTOFF)
2966 MIB.addImm(0);
2967 AddOptionalDefs(MIB);
2968
2969 return DestReg2;
2970}
2971
Evan Cheng092e5e72013-02-11 01:27:15 +00002972bool ARMFastISel::FastLowerArguments() {
2973 if (!FuncInfo.CanLowerReturn)
2974 return false;
2975
2976 const Function *F = FuncInfo.Fn;
2977 if (F->isVarArg())
2978 return false;
2979
2980 CallingConv::ID CC = F->getCallingConv();
2981 switch (CC) {
2982 default:
2983 return false;
2984 case CallingConv::Fast:
2985 case CallingConv::C:
2986 case CallingConv::ARM_AAPCS_VFP:
2987 case CallingConv::ARM_AAPCS:
2988 case CallingConv::ARM_APCS:
2989 break;
2990 }
2991
2992 // Only handle simple cases. i.e. Up to 4 i8/i16/i32 scalar arguments
2993 // which are passed in r0 - r3.
2994 unsigned Idx = 1;
2995 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
2996 I != E; ++I, ++Idx) {
2997 if (Idx > 4)
2998 return false;
2999
3000 if (F->getAttributes().hasAttribute(Idx, Attribute::InReg) ||
3001 F->getAttributes().hasAttribute(Idx, Attribute::StructRet) ||
3002 F->getAttributes().hasAttribute(Idx, Attribute::ByVal))
3003 return false;
3004
3005 Type *ArgTy = I->getType();
3006 if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy())
3007 return false;
3008
3009 EVT ArgVT = TLI.getValueType(ArgTy);
Chad Rosierfe88aa02013-02-26 01:05:31 +00003010 if (!ArgVT.isSimple()) return false;
Evan Cheng092e5e72013-02-11 01:27:15 +00003011 switch (ArgVT.getSimpleVT().SimpleTy) {
3012 case MVT::i8:
3013 case MVT::i16:
3014 case MVT::i32:
3015 break;
3016 default:
3017 return false;
3018 }
3019 }
3020
3021
3022 static const uint16_t GPRArgRegs[] = {
3023 ARM::R0, ARM::R1, ARM::R2, ARM::R3
3024 };
3025
3026 const TargetRegisterClass *RC = TLI.getRegClassFor(MVT::i32);
3027 Idx = 0;
3028 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
3029 I != E; ++I, ++Idx) {
Evan Cheng092e5e72013-02-11 01:27:15 +00003030 unsigned SrcReg = GPRArgRegs[Idx];
3031 unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
3032 // FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
3033 // Without this, EmitLiveInCopies may eliminate the livein if its only
3034 // use is a bitcast (which isn't turned into an instruction).
3035 unsigned ResultReg = createResultReg(RC);
3036 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
3037 ResultReg).addReg(DstReg, getKillRegState(true));
3038 UpdateValueMap(I, ResultReg);
3039 }
3040
3041 return true;
3042}
3043
Eric Christopherab695882010-07-21 22:26:11 +00003044namespace llvm {
Bob Wilsond49edb72012-08-03 04:06:28 +00003045 FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo,
3046 const TargetLibraryInfo *libInfo) {
Eric Christopherfeadddd2010-10-11 20:05:22 +00003047 const TargetMachine &TM = funcInfo.MF->getTarget();
Jim Grosbach16cb3762010-11-09 19:22:26 +00003048
Eric Christopherfeadddd2010-10-11 20:05:22 +00003049 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>();
JF Bastienfe532ad2013-06-14 02:49:43 +00003050 // Thumb2 support on iOS; ARM support on iOS, Linux and NaCl.
3051 bool UseFastISel = false;
3052 UseFastISel |= Subtarget->isTargetIOS() && !Subtarget->isThumb1Only();
3053 UseFastISel |= Subtarget->isTargetLinux() && !Subtarget->isThumb();
3054 UseFastISel |= Subtarget->isTargetNaCl() && !Subtarget->isThumb();
3055
3056 if (UseFastISel) {
3057 // iOS always has a FP for backtracking, force other targets
3058 // to keep their FP when doing FastISel. The emitted code is
3059 // currently superior, and in cases like test-suite's lencod
3060 // FastISel isn't quite correct when FP is eliminated.
3061 TM.Options.NoFramePointerElim = true;
Bob Wilsond49edb72012-08-03 04:06:28 +00003062 return new ARMFastISel(funcInfo, libInfo);
JF Bastienfe532ad2013-06-14 02:49:43 +00003063 }
Evan Cheng09447952010-07-26 18:32:55 +00003064 return 0;
Eric Christopherab695882010-07-21 22:26:11 +00003065 }
3066}