blob: 10359a74af7968b12c5822a6f728a6aa46c70cd2 [file] [log] [blame]
Dan Gohman1adf1b02008-08-19 21:45:35 +00001//===-- X86FastISel.cpp - X86 FastISel implementation ---------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the X86-specific support for the FastISel class. Much
11// of the target-specific code is generated by tablegen in the file
12// X86GenFastISel.inc, which is #included here.
13//
14//===----------------------------------------------------------------------===//
15
16#include "X86.h"
Evan Cheng8b19e562008-09-03 06:44:39 +000017#include "X86InstrBuilder.h"
Dan Gohman1adf1b02008-08-19 21:45:35 +000018#include "X86ISelLowering.h"
Evan Cheng88e30412008-09-03 01:04:47 +000019#include "X86RegisterInfo.h"
20#include "X86Subtarget.h"
Dan Gohman22bb3112008-08-22 00:20:26 +000021#include "X86TargetMachine.h"
Evan Chengf3d4efe2008-09-07 09:09:33 +000022#include "llvm/CallingConv.h"
Dan Gohman6e3f05f2008-09-04 23:26:51 +000023#include "llvm/DerivedTypes.h"
Dan Gohmane9865942009-02-23 22:03:08 +000024#include "llvm/GlobalVariable.h"
Evan Chengf3d4efe2008-09-07 09:09:33 +000025#include "llvm/Instructions.h"
Bill Wendling52370a12008-12-09 02:42:50 +000026#include "llvm/Intrinsics.h"
Evan Chengc3f44b02008-09-03 00:03:49 +000027#include "llvm/CodeGen/FastISel.h"
Owen Anderson95267a12008-09-05 00:06:23 +000028#include "llvm/CodeGen/MachineConstantPool.h"
Evan Chengf3d4efe2008-09-07 09:09:33 +000029#include "llvm/CodeGen/MachineFrameInfo.h"
Owen Anderson667d8f72008-08-29 17:45:56 +000030#include "llvm/CodeGen/MachineRegisterInfo.h"
Evan Chengf3d4efe2008-09-07 09:09:33 +000031#include "llvm/Support/CallSite.h"
Dan Gohman35893082008-09-18 23:23:44 +000032#include "llvm/Support/GetElementPtrTypeIterator.h"
Evan Chengc3f44b02008-09-03 00:03:49 +000033using namespace llvm;
34
Chris Lattner087fcf32009-03-08 18:44:31 +000035namespace {
36
Evan Chengc3f44b02008-09-03 00:03:49 +000037class X86FastISel : public FastISel {
38 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
39 /// make the right decision when generating code for different targets.
40 const X86Subtarget *Subtarget;
Evan Chengf3d4efe2008-09-07 09:09:33 +000041
42 /// StackPtr - Register used as the stack pointer.
43 ///
44 unsigned StackPtr;
45
46 /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87
47 /// floating point ops.
48 /// When SSE is available, use it for f32 operations.
49 /// When SSE2 is available, use it for f64 operations.
50 bool X86ScalarSSEf64;
51 bool X86ScalarSSEf32;
52
Evan Cheng8b19e562008-09-03 06:44:39 +000053public:
Dan Gohman3df24e62008-09-03 23:12:08 +000054 explicit X86FastISel(MachineFunction &mf,
Dan Gohmand57dd5f2008-09-23 21:53:34 +000055 MachineModuleInfo *mmi,
Devang Patel83489bb2009-01-13 00:35:13 +000056 DwarfWriter *dw,
Dan Gohman3df24e62008-09-03 23:12:08 +000057 DenseMap<const Value *, unsigned> &vm,
Dan Gohman0586d912008-09-10 20:11:02 +000058 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
Dan Gohmandd5b58a2008-10-14 23:54:11 +000059 DenseMap<const AllocaInst *, int> &am
60#ifndef NDEBUG
61 , SmallSet<Instruction*, 8> &cil
62#endif
63 )
Devang Patel83489bb2009-01-13 00:35:13 +000064 : FastISel(mf, mmi, dw, vm, bm, am
Dan Gohmandd5b58a2008-10-14 23:54:11 +000065#ifndef NDEBUG
66 , cil
67#endif
68 ) {
Evan Cheng88e30412008-09-03 01:04:47 +000069 Subtarget = &TM.getSubtarget<X86Subtarget>();
Evan Chengf3d4efe2008-09-07 09:09:33 +000070 StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP;
71 X86ScalarSSEf64 = Subtarget->hasSSE2();
72 X86ScalarSSEf32 = Subtarget->hasSSE1();
Evan Cheng88e30412008-09-03 01:04:47 +000073 }
Evan Chengc3f44b02008-09-03 00:03:49 +000074
Dan Gohman3df24e62008-09-03 23:12:08 +000075 virtual bool TargetSelectInstruction(Instruction *I);
Evan Chengc3f44b02008-09-03 00:03:49 +000076
Dan Gohman1adf1b02008-08-19 21:45:35 +000077#include "X86GenFastISel.inc"
Evan Cheng8b19e562008-09-03 06:44:39 +000078
79private:
Chris Lattner9a08a612008-10-15 04:26:38 +000080 bool X86FastEmitCompare(Value *LHS, Value *RHS, MVT VT);
81
Dan Gohman0586d912008-09-10 20:11:02 +000082 bool X86FastEmitLoad(MVT VT, const X86AddressMode &AM, unsigned &RR);
Evan Cheng0de588f2008-09-05 21:00:03 +000083
Chris Lattner438949a2008-10-15 05:30:52 +000084 bool X86FastEmitStore(MVT VT, Value *Val,
85 const X86AddressMode &AM);
Evan Chengf3d4efe2008-09-07 09:09:33 +000086 bool X86FastEmitStore(MVT VT, unsigned Val,
Dan Gohman0586d912008-09-10 20:11:02 +000087 const X86AddressMode &AM);
Evan Cheng24e3a902008-09-08 06:35:17 +000088
89 bool X86FastEmitExtend(ISD::NodeType Opc, MVT DstVT, unsigned Src, MVT SrcVT,
90 unsigned &ResultReg);
Evan Cheng0de588f2008-09-05 21:00:03 +000091
Dan Gohman2ff7fd12008-09-19 22:16:54 +000092 bool X86SelectAddress(Value *V, X86AddressMode &AM, bool isCall);
Dan Gohman0586d912008-09-10 20:11:02 +000093
Dan Gohman3df24e62008-09-03 23:12:08 +000094 bool X86SelectLoad(Instruction *I);
Owen Andersona3971df2008-09-04 07:08:58 +000095
96 bool X86SelectStore(Instruction *I);
Dan Gohman6e3f05f2008-09-04 23:26:51 +000097
98 bool X86SelectCmp(Instruction *I);
Dan Gohmand89ae992008-09-05 01:06:14 +000099
100 bool X86SelectZExt(Instruction *I);
101
102 bool X86SelectBranch(Instruction *I);
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000103
104 bool X86SelectShift(Instruction *I);
105
106 bool X86SelectSelect(Instruction *I);
Evan Cheng0de588f2008-09-05 21:00:03 +0000107
Evan Cheng10a8d9c2008-09-07 08:47:42 +0000108 bool X86SelectTrunc(Instruction *I);
Dan Gohmand98d6202008-10-02 22:15:21 +0000109
Dan Gohman78efce62008-09-10 21:02:08 +0000110 bool X86SelectFPExt(Instruction *I);
111 bool X86SelectFPTrunc(Instruction *I);
112
Bill Wendling52370a12008-12-09 02:42:50 +0000113 bool X86SelectExtractValue(Instruction *I);
114
115 bool X86VisitIntrinsicCall(CallInst &I, unsigned Intrinsic);
Evan Chengf3d4efe2008-09-07 09:09:33 +0000116 bool X86SelectCall(Instruction *I);
117
118 CCAssignFn *CCAssignFnForCall(unsigned CC, bool isTailCall = false);
119
Dan Gohman2cc3aa42008-09-25 15:24:26 +0000120 const X86InstrInfo *getInstrInfo() const {
Dan Gohman97135e12008-09-26 19:15:30 +0000121 return getTargetMachine()->getInstrInfo();
122 }
123 const X86TargetMachine *getTargetMachine() const {
124 return static_cast<const X86TargetMachine *>(&TM);
Dan Gohman2cc3aa42008-09-25 15:24:26 +0000125 }
126
Dan Gohman0586d912008-09-10 20:11:02 +0000127 unsigned TargetMaterializeConstant(Constant *C);
128
129 unsigned TargetMaterializeAlloca(AllocaInst *C);
Evan Chengf3d4efe2008-09-07 09:09:33 +0000130
131 /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
132 /// computed in an SSE register, not on the X87 floating point stack.
133 bool isScalarFPTypeInSSEReg(MVT VT) const {
134 return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
135 (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
136 }
137
Chris Lattner160f6cc2008-10-15 05:07:36 +0000138 bool isTypeLegal(const Type *Ty, MVT &VT, bool AllowI1 = false);
Evan Chengc3f44b02008-09-03 00:03:49 +0000139};
Chris Lattner087fcf32009-03-08 18:44:31 +0000140
141} // end anonymous namespace.
Dan Gohman99b21822008-08-28 23:21:34 +0000142
Chris Lattner160f6cc2008-10-15 05:07:36 +0000143bool X86FastISel::isTypeLegal(const Type *Ty, MVT &VT, bool AllowI1) {
144 VT = TLI.getValueType(Ty, /*HandleUnknown=*/true);
Evan Chengf3d4efe2008-09-07 09:09:33 +0000145 if (VT == MVT::Other || !VT.isSimple())
146 // Unhandled type. Halt "fast" selection and bail.
147 return false;
Chris Lattner160f6cc2008-10-15 05:07:36 +0000148
Dan Gohman9b66d732008-09-30 00:48:39 +0000149 // For now, require SSE/SSE2 for performing floating-point operations,
150 // since x87 requires additional work.
151 if (VT == MVT::f64 && !X86ScalarSSEf64)
152 return false;
153 if (VT == MVT::f32 && !X86ScalarSSEf32)
154 return false;
155 // Similarly, no f80 support yet.
156 if (VT == MVT::f80)
157 return false;
Evan Chengf3d4efe2008-09-07 09:09:33 +0000158 // We only handle legal types. For example, on x86-32 the instruction
159 // selector contains all of the 64-bit instructions from x86-64,
160 // under the assumption that i64 won't be used if the target doesn't
161 // support it.
Evan Chengdebdea02008-09-08 17:15:42 +0000162 return (AllowI1 && VT == MVT::i1) || TLI.isTypeLegal(VT);
Evan Chengf3d4efe2008-09-07 09:09:33 +0000163}
164
165#include "X86GenCallingConv.inc"
166
167/// CCAssignFnForCall - Selects the correct CCAssignFn for a given calling
168/// convention.
169CCAssignFn *X86FastISel::CCAssignFnForCall(unsigned CC, bool isTaillCall) {
170 if (Subtarget->is64Bit()) {
171 if (Subtarget->isTargetWin64())
172 return CC_X86_Win64_C;
173 else if (CC == CallingConv::Fast && isTaillCall)
174 return CC_X86_64_TailCall;
175 else
176 return CC_X86_64_C;
177 }
178
179 if (CC == CallingConv::X86_FastCall)
180 return CC_X86_32_FastCall;
Evan Chengf3d4efe2008-09-07 09:09:33 +0000181 else if (CC == CallingConv::Fast)
182 return CC_X86_32_FastCC;
183 else
184 return CC_X86_32_C;
185}
186
Evan Cheng0de588f2008-09-05 21:00:03 +0000187/// X86FastEmitLoad - Emit a machine instruction to load a value of type VT.
Evan Chengf3d4efe2008-09-07 09:09:33 +0000188/// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV.
Evan Cheng0de588f2008-09-05 21:00:03 +0000189/// Return true and the result register by reference if it is possible.
Dan Gohman0586d912008-09-10 20:11:02 +0000190bool X86FastISel::X86FastEmitLoad(MVT VT, const X86AddressMode &AM,
Evan Cheng0de588f2008-09-05 21:00:03 +0000191 unsigned &ResultReg) {
192 // Get opcode and regclass of the output for the given load instruction.
193 unsigned Opc = 0;
194 const TargetRegisterClass *RC = NULL;
195 switch (VT.getSimpleVT()) {
196 default: return false;
197 case MVT::i8:
198 Opc = X86::MOV8rm;
199 RC = X86::GR8RegisterClass;
200 break;
201 case MVT::i16:
202 Opc = X86::MOV16rm;
203 RC = X86::GR16RegisterClass;
204 break;
205 case MVT::i32:
206 Opc = X86::MOV32rm;
207 RC = X86::GR32RegisterClass;
208 break;
209 case MVT::i64:
210 // Must be in x86-64 mode.
211 Opc = X86::MOV64rm;
212 RC = X86::GR64RegisterClass;
213 break;
214 case MVT::f32:
215 if (Subtarget->hasSSE1()) {
216 Opc = X86::MOVSSrm;
217 RC = X86::FR32RegisterClass;
218 } else {
219 Opc = X86::LD_Fp32m;
220 RC = X86::RFP32RegisterClass;
221 }
222 break;
223 case MVT::f64:
224 if (Subtarget->hasSSE2()) {
225 Opc = X86::MOVSDrm;
226 RC = X86::FR64RegisterClass;
227 } else {
228 Opc = X86::LD_Fp64m;
229 RC = X86::RFP64RegisterClass;
230 }
231 break;
232 case MVT::f80:
Dan Gohman5af29c22008-09-26 01:39:32 +0000233 // No f80 support yet.
234 return false;
Evan Cheng0de588f2008-09-05 21:00:03 +0000235 }
236
237 ResultReg = createResultReg(RC);
Dale Johannesen8d13f8f2009-02-13 02:33:27 +0000238 addFullAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
Evan Cheng0de588f2008-09-05 21:00:03 +0000239 return true;
240}
241
Evan Chengf3d4efe2008-09-07 09:09:33 +0000242/// X86FastEmitStore - Emit a machine instruction to store a value Val of
243/// type VT. The address is either pre-computed, consisted of a base ptr, Ptr
244/// and a displacement offset, or a GlobalAddress,
Evan Cheng0de588f2008-09-05 21:00:03 +0000245/// i.e. V. Return true if it is possible.
246bool
Evan Chengf3d4efe2008-09-07 09:09:33 +0000247X86FastISel::X86FastEmitStore(MVT VT, unsigned Val,
Dan Gohman0586d912008-09-10 20:11:02 +0000248 const X86AddressMode &AM) {
Dan Gohman863890e2008-09-08 16:31:35 +0000249 // Get opcode and regclass of the output for the given store instruction.
Evan Cheng0de588f2008-09-05 21:00:03 +0000250 unsigned Opc = 0;
Evan Cheng0de588f2008-09-05 21:00:03 +0000251 switch (VT.getSimpleVT()) {
Chris Lattner241ab472008-10-15 05:38:32 +0000252 case MVT::f80: // No f80 support yet.
Evan Cheng0de588f2008-09-05 21:00:03 +0000253 default: return false;
Chris Lattner241ab472008-10-15 05:38:32 +0000254 case MVT::i8: Opc = X86::MOV8mr; break;
255 case MVT::i16: Opc = X86::MOV16mr; break;
256 case MVT::i32: Opc = X86::MOV32mr; break;
257 case MVT::i64: Opc = X86::MOV64mr; break; // Must be in x86-64 mode.
Evan Cheng0de588f2008-09-05 21:00:03 +0000258 case MVT::f32:
Chris Lattner438949a2008-10-15 05:30:52 +0000259 Opc = Subtarget->hasSSE1() ? X86::MOVSSmr : X86::ST_Fp32m;
Evan Cheng0de588f2008-09-05 21:00:03 +0000260 break;
261 case MVT::f64:
Chris Lattner438949a2008-10-15 05:30:52 +0000262 Opc = Subtarget->hasSSE2() ? X86::MOVSDmr : X86::ST_Fp64m;
Evan Cheng0de588f2008-09-05 21:00:03 +0000263 break;
Evan Cheng0de588f2008-09-05 21:00:03 +0000264 }
Chris Lattner438949a2008-10-15 05:30:52 +0000265
Dale Johannesen8d13f8f2009-02-13 02:33:27 +0000266 addFullAddress(BuildMI(MBB, DL, TII.get(Opc)), AM).addReg(Val);
Evan Cheng0de588f2008-09-05 21:00:03 +0000267 return true;
268}
269
Chris Lattner438949a2008-10-15 05:30:52 +0000270bool X86FastISel::X86FastEmitStore(MVT VT, Value *Val,
271 const X86AddressMode &AM) {
272 // Handle 'null' like i32/i64 0.
273 if (isa<ConstantPointerNull>(Val))
274 Val = Constant::getNullValue(TD.getIntPtrType());
275
276 // If this is a store of a simple constant, fold the constant into the store.
277 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
278 unsigned Opc = 0;
279 switch (VT.getSimpleVT()) {
280 default: break;
281 case MVT::i8: Opc = X86::MOV8mi; break;
282 case MVT::i16: Opc = X86::MOV16mi; break;
283 case MVT::i32: Opc = X86::MOV32mi; break;
284 case MVT::i64:
285 // Must be a 32-bit sign extended value.
286 if ((int)CI->getSExtValue() == CI->getSExtValue())
287 Opc = X86::MOV64mi32;
288 break;
289 }
290
291 if (Opc) {
Dale Johannesen8d13f8f2009-02-13 02:33:27 +0000292 addFullAddress(BuildMI(MBB, DL, TII.get(Opc)), AM)
293 .addImm(CI->getSExtValue());
Chris Lattner438949a2008-10-15 05:30:52 +0000294 return true;
295 }
296 }
297
298 unsigned ValReg = getRegForValue(Val);
299 if (ValReg == 0)
Chris Lattner438949a2008-10-15 05:30:52 +0000300 return false;
301
302 return X86FastEmitStore(VT, ValReg, AM);
303}
304
Evan Cheng24e3a902008-09-08 06:35:17 +0000305/// X86FastEmitExtend - Emit a machine instruction to extend a value Src of
306/// type SrcVT to type DstVT using the specified extension opcode Opc (e.g.
307/// ISD::SIGN_EXTEND).
308bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, MVT DstVT,
309 unsigned Src, MVT SrcVT,
310 unsigned &ResultReg) {
Owen Andersonac34a002008-09-11 19:44:55 +0000311 unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, Src);
312
313 if (RR != 0) {
314 ResultReg = RR;
315 return true;
316 } else
317 return false;
Evan Cheng24e3a902008-09-08 06:35:17 +0000318}
319
Dan Gohman0586d912008-09-10 20:11:02 +0000320/// X86SelectAddress - Attempt to fill in an address from the given value.
321///
Dan Gohman2ff7fd12008-09-19 22:16:54 +0000322bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM, bool isCall) {
Dan Gohman35893082008-09-18 23:23:44 +0000323 User *U;
324 unsigned Opcode = Instruction::UserOp1;
325 if (Instruction *I = dyn_cast<Instruction>(V)) {
326 Opcode = I->getOpcode();
327 U = I;
328 } else if (ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
329 Opcode = C->getOpcode();
330 U = C;
331 }
Dan Gohman0586d912008-09-10 20:11:02 +0000332
Dan Gohman35893082008-09-18 23:23:44 +0000333 switch (Opcode) {
334 default: break;
335 case Instruction::BitCast:
336 // Look past bitcasts.
Dan Gohman2ff7fd12008-09-19 22:16:54 +0000337 return X86SelectAddress(U->getOperand(0), AM, isCall);
Dan Gohman35893082008-09-18 23:23:44 +0000338
339 case Instruction::IntToPtr:
340 // Look past no-op inttoptrs.
341 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy())
Dan Gohman2ff7fd12008-09-19 22:16:54 +0000342 return X86SelectAddress(U->getOperand(0), AM, isCall);
Dan Gohman55fdaec2008-12-08 23:50:06 +0000343 break;
Dan Gohman35893082008-09-18 23:23:44 +0000344
345 case Instruction::PtrToInt:
346 // Look past no-op ptrtoints.
347 if (TLI.getValueType(U->getType()) == TLI.getPointerTy())
Dan Gohman2ff7fd12008-09-19 22:16:54 +0000348 return X86SelectAddress(U->getOperand(0), AM, isCall);
Dan Gohman55fdaec2008-12-08 23:50:06 +0000349 break;
Dan Gohman35893082008-09-18 23:23:44 +0000350
351 case Instruction::Alloca: {
Dan Gohman2ff7fd12008-09-19 22:16:54 +0000352 if (isCall) break;
Dan Gohman35893082008-09-18 23:23:44 +0000353 // Do static allocas.
354 const AllocaInst *A = cast<AllocaInst>(V);
Dan Gohman0586d912008-09-10 20:11:02 +0000355 DenseMap<const AllocaInst*, int>::iterator SI = StaticAllocaMap.find(A);
Dan Gohman97135e12008-09-26 19:15:30 +0000356 if (SI != StaticAllocaMap.end()) {
357 AM.BaseType = X86AddressMode::FrameIndexBase;
358 AM.Base.FrameIndex = SI->second;
359 return true;
360 }
361 break;
Dan Gohman35893082008-09-18 23:23:44 +0000362 }
363
364 case Instruction::Add: {
Dan Gohman2ff7fd12008-09-19 22:16:54 +0000365 if (isCall) break;
Dan Gohman35893082008-09-18 23:23:44 +0000366 // Adds of constants are common and easy enough.
367 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
Dan Gohman09aae462008-09-26 20:04:15 +0000368 uint64_t Disp = (int32_t)AM.Disp + (uint64_t)CI->getSExtValue();
369 // They have to fit in the 32-bit signed displacement field though.
370 if (isInt32(Disp)) {
371 AM.Disp = (uint32_t)Disp;
372 return X86SelectAddress(U->getOperand(0), AM, isCall);
373 }
Dan Gohman0586d912008-09-10 20:11:02 +0000374 }
Dan Gohman35893082008-09-18 23:23:44 +0000375 break;
376 }
377
378 case Instruction::GetElementPtr: {
Dan Gohman2ff7fd12008-09-19 22:16:54 +0000379 if (isCall) break;
Dan Gohman35893082008-09-18 23:23:44 +0000380 // Pattern-match simple GEPs.
Dan Gohman09aae462008-09-26 20:04:15 +0000381 uint64_t Disp = (int32_t)AM.Disp;
Dan Gohman35893082008-09-18 23:23:44 +0000382 unsigned IndexReg = AM.IndexReg;
383 unsigned Scale = AM.Scale;
384 gep_type_iterator GTI = gep_type_begin(U);
Dan Gohmanc8a1a3c2008-12-08 07:57:47 +0000385 // Iterate through the indices, folding what we can. Constants can be
386 // folded, and one dynamic index can be handled, if the scale is supported.
Dan Gohman35893082008-09-18 23:23:44 +0000387 for (User::op_iterator i = U->op_begin() + 1, e = U->op_end();
388 i != e; ++i, ++GTI) {
389 Value *Op = *i;
390 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
391 const StructLayout *SL = TD.getStructLayout(STy);
392 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
393 Disp += SL->getElementOffset(Idx);
394 } else {
Duncan Sandsceb4d1a2009-01-12 20:38:59 +0000395 uint64_t S = TD.getTypePaddedSize(GTI.getIndexedType());
Dan Gohman35893082008-09-18 23:23:44 +0000396 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
397 // Constant-offset addressing.
Dan Gohman09aae462008-09-26 20:04:15 +0000398 Disp += CI->getSExtValue() * S;
Dan Gohman35893082008-09-18 23:23:44 +0000399 } else if (IndexReg == 0 &&
Dan Gohman97135e12008-09-26 19:15:30 +0000400 (!AM.GV ||
401 !getTargetMachine()->symbolicAddressesAreRIPRel()) &&
Dan Gohman35893082008-09-18 23:23:44 +0000402 (S == 1 || S == 2 || S == 4 || S == 8)) {
403 // Scaled-index addressing.
404 Scale = S;
Dan Gohmanc8a1a3c2008-12-08 07:57:47 +0000405 IndexReg = getRegForGEPIndex(Op);
Dan Gohman35893082008-09-18 23:23:44 +0000406 if (IndexReg == 0)
407 return false;
408 } else
409 // Unsupported.
410 goto unsupported_gep;
411 }
412 }
Dan Gohman09aae462008-09-26 20:04:15 +0000413 // Check for displacement overflow.
414 if (!isInt32(Disp))
415 break;
Dan Gohman35893082008-09-18 23:23:44 +0000416 // Ok, the GEP indices were covered by constant-offset and scaled-index
417 // addressing. Update the address state and move on to examining the base.
418 AM.IndexReg = IndexReg;
419 AM.Scale = Scale;
Dan Gohman09aae462008-09-26 20:04:15 +0000420 AM.Disp = (uint32_t)Disp;
Dan Gohman2ff7fd12008-09-19 22:16:54 +0000421 return X86SelectAddress(U->getOperand(0), AM, isCall);
Dan Gohman35893082008-09-18 23:23:44 +0000422 unsupported_gep:
423 // Ok, the GEP indices weren't all covered.
424 break;
425 }
426 }
427
428 // Handle constant address.
Dan Gohman2ff7fd12008-09-19 22:16:54 +0000429 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
Dan Gohman2cc3aa42008-09-25 15:24:26 +0000430 // Can't handle alternate code models yet.
431 if (TM.getCodeModel() != CodeModel::Default &&
432 TM.getCodeModel() != CodeModel::Small)
433 return false;
434
Dan Gohman97135e12008-09-26 19:15:30 +0000435 // RIP-relative addresses can't have additional register operands.
436 if (getTargetMachine()->symbolicAddressesAreRIPRel() &&
437 (AM.Base.Reg != 0 || AM.IndexReg != 0))
438 return false;
439
Dan Gohmane9865942009-02-23 22:03:08 +0000440 // Can't handle TLS yet.
441 if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
442 if (GVar->isThreadLocal())
443 return false;
444
Dan Gohman2cc3aa42008-09-25 15:24:26 +0000445 // Set up the basic address.
446 AM.GV = GV;
447 if (!isCall &&
448 TM.getRelocationModel() == Reloc::PIC_ &&
449 !Subtarget->is64Bit())
Dan Gohman57c3dac2008-09-30 00:58:23 +0000450 AM.Base.Reg = getInstrInfo()->getGlobalBaseReg(&MF);
Dan Gohman2cc3aa42008-09-25 15:24:26 +0000451
452 // Emit an extra load if the ABI requires it.
Dan Gohman2ff7fd12008-09-19 22:16:54 +0000453 if (Subtarget->GVRequiresExtraLoad(GV, TM, isCall)) {
454 // Check to see if we've already materialized this
455 // value in a register in this block.
Dan Gohman7e8ef602008-09-19 23:42:04 +0000456 if (unsigned Reg = LocalValueMap[V]) {
457 AM.Base.Reg = Reg;
Dan Gohman2cc3aa42008-09-25 15:24:26 +0000458 AM.GV = 0;
Dan Gohman7e8ef602008-09-19 23:42:04 +0000459 return true;
460 }
Dan Gohman2ff7fd12008-09-19 22:16:54 +0000461 // Issue load from stub if necessary.
462 unsigned Opc = 0;
463 const TargetRegisterClass *RC = NULL;
464 if (TLI.getPointerTy() == MVT::i32) {
465 Opc = X86::MOV32rm;
466 RC = X86::GR32RegisterClass;
467 } else {
468 Opc = X86::MOV64rm;
469 RC = X86::GR64RegisterClass;
470 }
Dan Gohman789ce772008-09-25 23:34:02 +0000471
472 X86AddressMode StubAM;
473 StubAM.Base.Reg = AM.Base.Reg;
474 StubAM.GV = AM.GV;
Dan Gohman2cc3aa42008-09-25 15:24:26 +0000475 unsigned ResultReg = createResultReg(RC);
Dale Johannesen8d13f8f2009-02-13 02:33:27 +0000476 addFullAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), StubAM);
Dan Gohman789ce772008-09-25 23:34:02 +0000477
478 // Now construct the final address. Note that the Disp, Scale,
479 // and Index values may already be set here.
Dan Gohman2cc3aa42008-09-25 15:24:26 +0000480 AM.Base.Reg = ResultReg;
481 AM.GV = 0;
Dan Gohman789ce772008-09-25 23:34:02 +0000482
Dan Gohman2ff7fd12008-09-19 22:16:54 +0000483 // Prevent loading GV stub multiple times in same MBB.
484 LocalValueMap[V] = AM.Base.Reg;
Dan Gohman2ff7fd12008-09-19 22:16:54 +0000485 }
486 return true;
Dan Gohman0586d912008-09-10 20:11:02 +0000487 }
488
Dan Gohman97135e12008-09-26 19:15:30 +0000489 // If all else fails, try to materialize the value in a register.
Dan Gohman7962e852008-09-29 21:13:15 +0000490 if (!AM.GV || !getTargetMachine()->symbolicAddressesAreRIPRel()) {
Dan Gohman97135e12008-09-26 19:15:30 +0000491 if (AM.Base.Reg == 0) {
492 AM.Base.Reg = getRegForValue(V);
493 return AM.Base.Reg != 0;
494 }
495 if (AM.IndexReg == 0) {
496 assert(AM.Scale == 1 && "Scale with no index!");
497 AM.IndexReg = getRegForValue(V);
498 return AM.IndexReg != 0;
499 }
500 }
501
502 return false;
Dan Gohman0586d912008-09-10 20:11:02 +0000503}
504
Owen Andersona3971df2008-09-04 07:08:58 +0000505/// X86SelectStore - Select and emit code to implement store instructions.
506bool X86FastISel::X86SelectStore(Instruction* I) {
Evan Cheng24e3a902008-09-08 06:35:17 +0000507 MVT VT;
Chris Lattner160f6cc2008-10-15 05:07:36 +0000508 if (!isTypeLegal(I->getOperand(0)->getType(), VT))
Owen Andersona3971df2008-09-04 07:08:58 +0000509 return false;
Owen Andersona3971df2008-09-04 07:08:58 +0000510
Dan Gohman0586d912008-09-10 20:11:02 +0000511 X86AddressMode AM;
Dan Gohman2ff7fd12008-09-19 22:16:54 +0000512 if (!X86SelectAddress(I->getOperand(1), AM, false))
Dan Gohman0586d912008-09-10 20:11:02 +0000513 return false;
Owen Andersona3971df2008-09-04 07:08:58 +0000514
Chris Lattner438949a2008-10-15 05:30:52 +0000515 return X86FastEmitStore(VT, I->getOperand(0), AM);
Owen Andersona3971df2008-09-04 07:08:58 +0000516}
517
Evan Cheng8b19e562008-09-03 06:44:39 +0000518/// X86SelectLoad - Select and emit code to implement load instructions.
519///
Dan Gohman3df24e62008-09-03 23:12:08 +0000520bool X86FastISel::X86SelectLoad(Instruction *I) {
Evan Chengf3d4efe2008-09-07 09:09:33 +0000521 MVT VT;
Chris Lattner160f6cc2008-10-15 05:07:36 +0000522 if (!isTypeLegal(I->getType(), VT))
Evan Cheng8b19e562008-09-03 06:44:39 +0000523 return false;
524
Dan Gohman0586d912008-09-10 20:11:02 +0000525 X86AddressMode AM;
Dan Gohman2ff7fd12008-09-19 22:16:54 +0000526 if (!X86SelectAddress(I->getOperand(0), AM, false))
Dan Gohman0586d912008-09-10 20:11:02 +0000527 return false;
Evan Cheng8b19e562008-09-03 06:44:39 +0000528
Evan Cheng0de588f2008-09-05 21:00:03 +0000529 unsigned ResultReg = 0;
Dan Gohman0586d912008-09-10 20:11:02 +0000530 if (X86FastEmitLoad(VT, AM, ResultReg)) {
Evan Cheng0de588f2008-09-05 21:00:03 +0000531 UpdateValueMap(I, ResultReg);
532 return true;
Evan Cheng8b19e562008-09-03 06:44:39 +0000533 }
Evan Cheng0de588f2008-09-05 21:00:03 +0000534 return false;
Evan Cheng8b19e562008-09-03 06:44:39 +0000535}
536
Chris Lattner51ccb3d2008-10-15 04:29:23 +0000537static unsigned X86ChooseCmpOpcode(MVT VT) {
Dan Gohmand98d6202008-10-02 22:15:21 +0000538 switch (VT.getSimpleVT()) {
Chris Lattner45ac17f2008-10-15 04:32:45 +0000539 default: return 0;
540 case MVT::i8: return X86::CMP8rr;
Dan Gohmand98d6202008-10-02 22:15:21 +0000541 case MVT::i16: return X86::CMP16rr;
542 case MVT::i32: return X86::CMP32rr;
543 case MVT::i64: return X86::CMP64rr;
544 case MVT::f32: return X86::UCOMISSrr;
545 case MVT::f64: return X86::UCOMISDrr;
Dan Gohmand98d6202008-10-02 22:15:21 +0000546 }
Dan Gohmand98d6202008-10-02 22:15:21 +0000547}
548
Chris Lattner0e13c782008-10-15 04:13:29 +0000549/// X86ChooseCmpImmediateOpcode - If we have a comparison with RHS as the RHS
550/// of the comparison, return an opcode that works for the compare (e.g.
551/// CMP32ri) otherwise return 0.
Chris Lattner45ac17f2008-10-15 04:32:45 +0000552static unsigned X86ChooseCmpImmediateOpcode(MVT VT, ConstantInt *RHSC) {
553 switch (VT.getSimpleVT()) {
Chris Lattner0e13c782008-10-15 04:13:29 +0000554 // Otherwise, we can't fold the immediate into this comparison.
Chris Lattner45ac17f2008-10-15 04:32:45 +0000555 default: return 0;
556 case MVT::i8: return X86::CMP8ri;
557 case MVT::i16: return X86::CMP16ri;
558 case MVT::i32: return X86::CMP32ri;
559 case MVT::i64:
560 // 64-bit comparisons are only valid if the immediate fits in a 32-bit sext
561 // field.
Chris Lattner438949a2008-10-15 05:30:52 +0000562 if ((int)RHSC->getSExtValue() == RHSC->getSExtValue())
Chris Lattner45ac17f2008-10-15 04:32:45 +0000563 return X86::CMP64ri32;
564 return 0;
565 }
Chris Lattner0e13c782008-10-15 04:13:29 +0000566}
567
Chris Lattner9a08a612008-10-15 04:26:38 +0000568bool X86FastISel::X86FastEmitCompare(Value *Op0, Value *Op1, MVT VT) {
569 unsigned Op0Reg = getRegForValue(Op0);
570 if (Op0Reg == 0) return false;
571
Chris Lattnerd53886b2008-10-15 05:18:04 +0000572 // Handle 'null' like i32/i64 0.
573 if (isa<ConstantPointerNull>(Op1))
574 Op1 = Constant::getNullValue(TD.getIntPtrType());
575
Chris Lattner9a08a612008-10-15 04:26:38 +0000576 // We have two options: compare with register or immediate. If the RHS of
577 // the compare is an immediate that we can fold into this compare, use
578 // CMPri, otherwise use CMPrr.
579 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
Chris Lattner45ac17f2008-10-15 04:32:45 +0000580 if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) {
Dale Johannesen8d13f8f2009-02-13 02:33:27 +0000581 BuildMI(MBB, DL, TII.get(CompareImmOpc)).addReg(Op0Reg)
Chris Lattner9a08a612008-10-15 04:26:38 +0000582 .addImm(Op1C->getSExtValue());
583 return true;
584 }
585 }
586
587 unsigned CompareOpc = X86ChooseCmpOpcode(VT);
588 if (CompareOpc == 0) return false;
589
590 unsigned Op1Reg = getRegForValue(Op1);
591 if (Op1Reg == 0) return false;
Dale Johannesen8d13f8f2009-02-13 02:33:27 +0000592 BuildMI(MBB, DL, TII.get(CompareOpc)).addReg(Op0Reg).addReg(Op1Reg);
Chris Lattner9a08a612008-10-15 04:26:38 +0000593
594 return true;
595}
596
Dan Gohman6e3f05f2008-09-04 23:26:51 +0000597bool X86FastISel::X86SelectCmp(Instruction *I) {
598 CmpInst *CI = cast<CmpInst>(I);
599
Dan Gohman9b66d732008-09-30 00:48:39 +0000600 MVT VT;
Chris Lattner160f6cc2008-10-15 05:07:36 +0000601 if (!isTypeLegal(I->getOperand(0)->getType(), VT))
Dan Gohman4f22bb02008-09-05 01:33:56 +0000602 return false;
603
Dan Gohman6e3f05f2008-09-04 23:26:51 +0000604 unsigned ResultReg = createResultReg(&X86::GR8RegClass);
Chris Lattner54aebde2008-10-15 03:47:17 +0000605 unsigned SetCCOpc;
Chris Lattner8aeeeb92008-10-15 03:52:54 +0000606 bool SwapArgs; // false -> compare Op0, Op1. true -> compare Op1, Op0.
Dan Gohman6e3f05f2008-09-04 23:26:51 +0000607 switch (CI->getPredicate()) {
608 case CmpInst::FCMP_OEQ: {
Chris Lattner51ccb3d2008-10-15 04:29:23 +0000609 if (!X86FastEmitCompare(CI->getOperand(0), CI->getOperand(1), VT))
610 return false;
Chris Lattner9a08a612008-10-15 04:26:38 +0000611
Dan Gohman6e3f05f2008-09-04 23:26:51 +0000612 unsigned EReg = createResultReg(&X86::GR8RegClass);
613 unsigned NPReg = createResultReg(&X86::GR8RegClass);
Dale Johannesen8d13f8f2009-02-13 02:33:27 +0000614 BuildMI(MBB, DL, TII.get(X86::SETEr), EReg);
615 BuildMI(MBB, DL, TII.get(X86::SETNPr), NPReg);
616 BuildMI(MBB, DL,
617 TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg);
Chris Lattner54aebde2008-10-15 03:47:17 +0000618 UpdateValueMap(I, ResultReg);
619 return true;
Dan Gohman6e3f05f2008-09-04 23:26:51 +0000620 }
621 case CmpInst::FCMP_UNE: {
Chris Lattner51ccb3d2008-10-15 04:29:23 +0000622 if (!X86FastEmitCompare(CI->getOperand(0), CI->getOperand(1), VT))
623 return false;
624
Dan Gohman6e3f05f2008-09-04 23:26:51 +0000625 unsigned NEReg = createResultReg(&X86::GR8RegClass);
626 unsigned PReg = createResultReg(&X86::GR8RegClass);
Dale Johannesen8d13f8f2009-02-13 02:33:27 +0000627 BuildMI(MBB, DL, TII.get(X86::SETNEr), NEReg);
628 BuildMI(MBB, DL, TII.get(X86::SETPr), PReg);
629 BuildMI(MBB, DL, TII.get(X86::OR8rr), ResultReg).addReg(PReg).addReg(NEReg);
Chris Lattner54aebde2008-10-15 03:47:17 +0000630 UpdateValueMap(I, ResultReg);
631 return true;
Dan Gohman6e3f05f2008-09-04 23:26:51 +0000632 }
Chris Lattner8aeeeb92008-10-15 03:52:54 +0000633 case CmpInst::FCMP_OGT: SwapArgs = false; SetCCOpc = X86::SETAr; break;
634 case CmpInst::FCMP_OGE: SwapArgs = false; SetCCOpc = X86::SETAEr; break;
635 case CmpInst::FCMP_OLT: SwapArgs = true; SetCCOpc = X86::SETAr; break;
636 case CmpInst::FCMP_OLE: SwapArgs = true; SetCCOpc = X86::SETAEr; break;
637 case CmpInst::FCMP_ONE: SwapArgs = false; SetCCOpc = X86::SETNEr; break;
638 case CmpInst::FCMP_ORD: SwapArgs = false; SetCCOpc = X86::SETNPr; break;
639 case CmpInst::FCMP_UNO: SwapArgs = false; SetCCOpc = X86::SETPr; break;
640 case CmpInst::FCMP_UEQ: SwapArgs = false; SetCCOpc = X86::SETEr; break;
641 case CmpInst::FCMP_UGT: SwapArgs = true; SetCCOpc = X86::SETBr; break;
642 case CmpInst::FCMP_UGE: SwapArgs = true; SetCCOpc = X86::SETBEr; break;
643 case CmpInst::FCMP_ULT: SwapArgs = false; SetCCOpc = X86::SETBr; break;
644 case CmpInst::FCMP_ULE: SwapArgs = false; SetCCOpc = X86::SETBEr; break;
645
646 case CmpInst::ICMP_EQ: SwapArgs = false; SetCCOpc = X86::SETEr; break;
647 case CmpInst::ICMP_NE: SwapArgs = false; SetCCOpc = X86::SETNEr; break;
648 case CmpInst::ICMP_UGT: SwapArgs = false; SetCCOpc = X86::SETAr; break;
649 case CmpInst::ICMP_UGE: SwapArgs = false; SetCCOpc = X86::SETAEr; break;
650 case CmpInst::ICMP_ULT: SwapArgs = false; SetCCOpc = X86::SETBr; break;
651 case CmpInst::ICMP_ULE: SwapArgs = false; SetCCOpc = X86::SETBEr; break;
652 case CmpInst::ICMP_SGT: SwapArgs = false; SetCCOpc = X86::SETGr; break;
653 case CmpInst::ICMP_SGE: SwapArgs = false; SetCCOpc = X86::SETGEr; break;
654 case CmpInst::ICMP_SLT: SwapArgs = false; SetCCOpc = X86::SETLr; break;
655 case CmpInst::ICMP_SLE: SwapArgs = false; SetCCOpc = X86::SETLEr; break;
Dan Gohman6e3f05f2008-09-04 23:26:51 +0000656 default:
657 return false;
658 }
659
Chris Lattner9a08a612008-10-15 04:26:38 +0000660 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
Chris Lattner8aeeeb92008-10-15 03:52:54 +0000661 if (SwapArgs)
Chris Lattner9a08a612008-10-15 04:26:38 +0000662 std::swap(Op0, Op1);
Chris Lattner8aeeeb92008-10-15 03:52:54 +0000663
Chris Lattner9a08a612008-10-15 04:26:38 +0000664 // Emit a compare of Op0/Op1.
Chris Lattner51ccb3d2008-10-15 04:29:23 +0000665 if (!X86FastEmitCompare(Op0, Op1, VT))
666 return false;
Chris Lattner9a08a612008-10-15 04:26:38 +0000667
Dale Johannesen8d13f8f2009-02-13 02:33:27 +0000668 BuildMI(MBB, DL, TII.get(SetCCOpc), ResultReg);
Dan Gohman6e3f05f2008-09-04 23:26:51 +0000669 UpdateValueMap(I, ResultReg);
670 return true;
671}
Evan Cheng8b19e562008-09-03 06:44:39 +0000672
Dan Gohmand89ae992008-09-05 01:06:14 +0000673bool X86FastISel::X86SelectZExt(Instruction *I) {
674 // Special-case hack: The only i1 values we know how to produce currently
675 // set the upper bits of an i8 value to zero.
676 if (I->getType() == Type::Int8Ty &&
677 I->getOperand(0)->getType() == Type::Int1Ty) {
678 unsigned ResultReg = getRegForValue(I->getOperand(0));
Dan Gohmanf52550b2008-09-05 01:15:35 +0000679 if (ResultReg == 0) return false;
Dan Gohmand89ae992008-09-05 01:06:14 +0000680 UpdateValueMap(I, ResultReg);
681 return true;
682 }
683
684 return false;
685}
686
Chris Lattner9a08a612008-10-15 04:26:38 +0000687
Dan Gohmand89ae992008-09-05 01:06:14 +0000688bool X86FastISel::X86SelectBranch(Instruction *I) {
Dan Gohmand89ae992008-09-05 01:06:14 +0000689 // Unconditional branches are selected by tablegen-generated code.
Dan Gohmand98d6202008-10-02 22:15:21 +0000690 // Handle a conditional branch.
691 BranchInst *BI = cast<BranchInst>(I);
Dan Gohmand89ae992008-09-05 01:06:14 +0000692 MachineBasicBlock *TrueMBB = MBBMap[BI->getSuccessor(0)];
693 MachineBasicBlock *FalseMBB = MBBMap[BI->getSuccessor(1)];
694
Dan Gohmand98d6202008-10-02 22:15:21 +0000695 // Fold the common case of a conditional branch with a comparison.
696 if (CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
697 if (CI->hasOneUse()) {
698 MVT VT = TLI.getValueType(CI->getOperand(0)->getType());
Dan Gohmand89ae992008-09-05 01:06:14 +0000699
Dan Gohmand98d6202008-10-02 22:15:21 +0000700 // Try to take advantage of fallthrough opportunities.
701 CmpInst::Predicate Predicate = CI->getPredicate();
702 if (MBB->isLayoutSuccessor(TrueMBB)) {
703 std::swap(TrueMBB, FalseMBB);
704 Predicate = CmpInst::getInversePredicate(Predicate);
705 }
706
Chris Lattner871d2462008-10-15 03:58:05 +0000707 bool SwapArgs; // false -> compare Op0, Op1. true -> compare Op1, Op0.
708 unsigned BranchOpc; // Opcode to jump on, e.g. "X86::JA"
709
Dan Gohmand98d6202008-10-02 22:15:21 +0000710 switch (Predicate) {
Dan Gohman7b66e042008-10-21 18:24:51 +0000711 case CmpInst::FCMP_OEQ:
712 std::swap(TrueMBB, FalseMBB);
713 Predicate = CmpInst::FCMP_UNE;
714 // FALL THROUGH
715 case CmpInst::FCMP_UNE: SwapArgs = false; BranchOpc = X86::JNE; break;
Chris Lattner871d2462008-10-15 03:58:05 +0000716 case CmpInst::FCMP_OGT: SwapArgs = false; BranchOpc = X86::JA; break;
717 case CmpInst::FCMP_OGE: SwapArgs = false; BranchOpc = X86::JAE; break;
718 case CmpInst::FCMP_OLT: SwapArgs = true; BranchOpc = X86::JA; break;
719 case CmpInst::FCMP_OLE: SwapArgs = true; BranchOpc = X86::JAE; break;
720 case CmpInst::FCMP_ONE: SwapArgs = false; BranchOpc = X86::JNE; break;
721 case CmpInst::FCMP_ORD: SwapArgs = false; BranchOpc = X86::JNP; break;
722 case CmpInst::FCMP_UNO: SwapArgs = false; BranchOpc = X86::JP; break;
723 case CmpInst::FCMP_UEQ: SwapArgs = false; BranchOpc = X86::JE; break;
724 case CmpInst::FCMP_UGT: SwapArgs = true; BranchOpc = X86::JB; break;
725 case CmpInst::FCMP_UGE: SwapArgs = true; BranchOpc = X86::JBE; break;
726 case CmpInst::FCMP_ULT: SwapArgs = false; BranchOpc = X86::JB; break;
727 case CmpInst::FCMP_ULE: SwapArgs = false; BranchOpc = X86::JBE; break;
Chris Lattner9a08a612008-10-15 04:26:38 +0000728
Chris Lattner871d2462008-10-15 03:58:05 +0000729 case CmpInst::ICMP_EQ: SwapArgs = false; BranchOpc = X86::JE; break;
730 case CmpInst::ICMP_NE: SwapArgs = false; BranchOpc = X86::JNE; break;
731 case CmpInst::ICMP_UGT: SwapArgs = false; BranchOpc = X86::JA; break;
732 case CmpInst::ICMP_UGE: SwapArgs = false; BranchOpc = X86::JAE; break;
733 case CmpInst::ICMP_ULT: SwapArgs = false; BranchOpc = X86::JB; break;
734 case CmpInst::ICMP_ULE: SwapArgs = false; BranchOpc = X86::JBE; break;
735 case CmpInst::ICMP_SGT: SwapArgs = false; BranchOpc = X86::JG; break;
736 case CmpInst::ICMP_SGE: SwapArgs = false; BranchOpc = X86::JGE; break;
737 case CmpInst::ICMP_SLT: SwapArgs = false; BranchOpc = X86::JL; break;
738 case CmpInst::ICMP_SLE: SwapArgs = false; BranchOpc = X86::JLE; break;
Dan Gohmand98d6202008-10-02 22:15:21 +0000739 default:
740 return false;
741 }
Chris Lattner54aebde2008-10-15 03:47:17 +0000742
Chris Lattner709d8292008-10-15 04:02:26 +0000743 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
744 if (SwapArgs)
745 std::swap(Op0, Op1);
746
Chris Lattner9a08a612008-10-15 04:26:38 +0000747 // Emit a compare of the LHS and RHS, setting the flags.
748 if (!X86FastEmitCompare(Op0, Op1, VT))
749 return false;
Chris Lattner0e13c782008-10-15 04:13:29 +0000750
Dale Johannesen8d13f8f2009-02-13 02:33:27 +0000751 BuildMI(MBB, DL, TII.get(BranchOpc)).addMBB(TrueMBB);
Dan Gohman7b66e042008-10-21 18:24:51 +0000752
753 if (Predicate == CmpInst::FCMP_UNE) {
754 // X86 requires a second branch to handle UNE (and OEQ,
755 // which is mapped to UNE above).
Dale Johannesen8d13f8f2009-02-13 02:33:27 +0000756 BuildMI(MBB, DL, TII.get(X86::JP)).addMBB(TrueMBB);
Dan Gohman7b66e042008-10-21 18:24:51 +0000757 }
758
Dan Gohmand98d6202008-10-02 22:15:21 +0000759 FastEmitBranch(FalseMBB);
Dan Gohman8c3f8b62008-10-07 22:10:33 +0000760 MBB->addSuccessor(TrueMBB);
Dan Gohmand98d6202008-10-02 22:15:21 +0000761 return true;
762 }
Bill Wendling30a64a72008-12-09 23:19:12 +0000763 } else if (ExtractValueInst *EI =
764 dyn_cast<ExtractValueInst>(BI->getCondition())) {
765 // Check to see if the branch instruction is from an "arithmetic with
766 // overflow" intrinsic. The main way these intrinsics are used is:
767 //
768 // %t = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
769 // %sum = extractvalue { i32, i1 } %t, 0
770 // %obit = extractvalue { i32, i1 } %t, 1
771 // br i1 %obit, label %overflow, label %normal
772 //
Dan Gohman653456c2009-01-07 00:15:08 +0000773 // The %sum and %obit are converted in an ADD and a SETO/SETB before
Bill Wendling30a64a72008-12-09 23:19:12 +0000774 // reaching the branch. Therefore, we search backwards through the MBB
Dan Gohman653456c2009-01-07 00:15:08 +0000775 // looking for the SETO/SETB instruction. If an instruction modifies the
776 // EFLAGS register before we reach the SETO/SETB instruction, then we can't
777 // convert the branch into a JO/JB instruction.
Bill Wendling30a64a72008-12-09 23:19:12 +0000778
Bill Wendling9a901322008-12-10 19:44:24 +0000779 Value *Agg = EI->getAggregateOperand();
Bill Wendling30a64a72008-12-09 23:19:12 +0000780
Bill Wendling9a901322008-12-10 19:44:24 +0000781 if (CallInst *CI = dyn_cast<CallInst>(Agg)) {
782 Function *F = CI->getCalledFunction();
Bill Wendling30a64a72008-12-09 23:19:12 +0000783
Bill Wendling9a901322008-12-10 19:44:24 +0000784 if (F && F->isDeclaration()) {
785 switch (F->getIntrinsicID()) {
786 default: break;
787 case Intrinsic::sadd_with_overflow:
788 case Intrinsic::uadd_with_overflow: {
789 const MachineInstr *SetMI = 0;
790 unsigned Reg = lookUpRegForValue(EI);
Bill Wendling30a64a72008-12-09 23:19:12 +0000791
Bill Wendling9a901322008-12-10 19:44:24 +0000792 for (MachineBasicBlock::const_reverse_iterator
793 RI = MBB->rbegin(), RE = MBB->rend(); RI != RE; ++RI) {
794 const MachineInstr &MI = *RI;
Bill Wendling30a64a72008-12-09 23:19:12 +0000795
Bill Wendling9a901322008-12-10 19:44:24 +0000796 if (MI.modifiesRegister(Reg)) {
Evan Cheng04ee5a12009-01-20 19:12:24 +0000797 unsigned Src, Dst, SrcSR, DstSR;
Bill Wendling30a64a72008-12-09 23:19:12 +0000798
Evan Cheng04ee5a12009-01-20 19:12:24 +0000799 if (getInstrInfo()->isMoveInstr(MI, Src, Dst, SrcSR, DstSR)) {
Bill Wendling9a901322008-12-10 19:44:24 +0000800 Reg = Src;
801 continue;
802 }
Bill Wendling30a64a72008-12-09 23:19:12 +0000803
Bill Wendling9a901322008-12-10 19:44:24 +0000804 SetMI = &MI;
805 break;
806 }
Bill Wendling30a64a72008-12-09 23:19:12 +0000807
Bill Wendling9a901322008-12-10 19:44:24 +0000808 const TargetInstrDesc &TID = MI.getDesc();
809 const unsigned *ImpDefs = TID.getImplicitDefs();
810
811 if (TID.hasUnmodeledSideEffects()) break;
812
813 bool ModifiesEFlags = false;
814
815 if (ImpDefs) {
816 for (unsigned u = 0; ImpDefs[u]; ++u)
817 if (ImpDefs[u] == X86::EFLAGS) {
818 ModifiesEFlags = true;
819 break;
820 }
821 }
822
823 if (ModifiesEFlags) break;
Bill Wendling30a64a72008-12-09 23:19:12 +0000824 }
Bill Wendling30a64a72008-12-09 23:19:12 +0000825
Bill Wendling9a901322008-12-10 19:44:24 +0000826 if (SetMI) {
827 unsigned OpCode = SetMI->getOpcode();
Bill Wendling30a64a72008-12-09 23:19:12 +0000828
Dan Gohman653456c2009-01-07 00:15:08 +0000829 if (OpCode == X86::SETOr || OpCode == X86::SETBr) {
Dale Johannesen8d13f8f2009-02-13 02:33:27 +0000830 BuildMI(MBB, DL, TII.get((OpCode == X86::SETOr) ?
Dan Gohman653456c2009-01-07 00:15:08 +0000831 X86::JO : X86::JB)).addMBB(TrueMBB);
Bill Wendling9a901322008-12-10 19:44:24 +0000832 FastEmitBranch(FalseMBB);
833 MBB->addSuccessor(TrueMBB);
834 return true;
835 }
836 }
837 }
838 }
Bill Wendling30a64a72008-12-09 23:19:12 +0000839 }
840 }
Dan Gohmand98d6202008-10-02 22:15:21 +0000841 }
842
843 // Otherwise do a clumsy setcc and re-test it.
844 unsigned OpReg = getRegForValue(BI->getCondition());
845 if (OpReg == 0) return false;
846
Dale Johannesen8d13f8f2009-02-13 02:33:27 +0000847 BuildMI(MBB, DL, TII.get(X86::TEST8rr)).addReg(OpReg).addReg(OpReg);
848 BuildMI(MBB, DL, TII.get(X86::JNE)).addMBB(TrueMBB);
Dan Gohmand98d6202008-10-02 22:15:21 +0000849 FastEmitBranch(FalseMBB);
Dan Gohman8c3f8b62008-10-07 22:10:33 +0000850 MBB->addSuccessor(TrueMBB);
Dan Gohmand89ae992008-09-05 01:06:14 +0000851 return true;
852}
853
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000854bool X86FastISel::X86SelectShift(Instruction *I) {
Chris Lattner743922e2008-09-21 21:44:29 +0000855 unsigned CReg = 0, OpReg = 0, OpImm = 0;
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000856 const TargetRegisterClass *RC = NULL;
857 if (I->getType() == Type::Int8Ty) {
858 CReg = X86::CL;
859 RC = &X86::GR8RegClass;
860 switch (I->getOpcode()) {
Chris Lattner743922e2008-09-21 21:44:29 +0000861 case Instruction::LShr: OpReg = X86::SHR8rCL; OpImm = X86::SHR8ri; break;
862 case Instruction::AShr: OpReg = X86::SAR8rCL; OpImm = X86::SAR8ri; break;
863 case Instruction::Shl: OpReg = X86::SHL8rCL; OpImm = X86::SHL8ri; break;
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000864 default: return false;
865 }
866 } else if (I->getType() == Type::Int16Ty) {
867 CReg = X86::CX;
868 RC = &X86::GR16RegClass;
869 switch (I->getOpcode()) {
Chris Lattner743922e2008-09-21 21:44:29 +0000870 case Instruction::LShr: OpReg = X86::SHR16rCL; OpImm = X86::SHR16ri; break;
871 case Instruction::AShr: OpReg = X86::SAR16rCL; OpImm = X86::SAR16ri; break;
872 case Instruction::Shl: OpReg = X86::SHL16rCL; OpImm = X86::SHL16ri; break;
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000873 default: return false;
874 }
875 } else if (I->getType() == Type::Int32Ty) {
876 CReg = X86::ECX;
877 RC = &X86::GR32RegClass;
878 switch (I->getOpcode()) {
Chris Lattner743922e2008-09-21 21:44:29 +0000879 case Instruction::LShr: OpReg = X86::SHR32rCL; OpImm = X86::SHR32ri; break;
880 case Instruction::AShr: OpReg = X86::SAR32rCL; OpImm = X86::SAR32ri; break;
881 case Instruction::Shl: OpReg = X86::SHL32rCL; OpImm = X86::SHL32ri; break;
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000882 default: return false;
883 }
884 } else if (I->getType() == Type::Int64Ty) {
885 CReg = X86::RCX;
886 RC = &X86::GR64RegClass;
887 switch (I->getOpcode()) {
Chris Lattner743922e2008-09-21 21:44:29 +0000888 case Instruction::LShr: OpReg = X86::SHR64rCL; OpImm = X86::SHR64ri; break;
889 case Instruction::AShr: OpReg = X86::SAR64rCL; OpImm = X86::SAR64ri; break;
890 case Instruction::Shl: OpReg = X86::SHL64rCL; OpImm = X86::SHL64ri; break;
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000891 default: return false;
892 }
893 } else {
894 return false;
895 }
896
Chris Lattner160f6cc2008-10-15 05:07:36 +0000897 MVT VT = TLI.getValueType(I->getType(), /*HandleUnknown=*/true);
898 if (VT == MVT::Other || !isTypeLegal(I->getType(), VT))
Dan Gohmanf58cb6d2008-09-05 21:27:34 +0000899 return false;
900
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000901 unsigned Op0Reg = getRegForValue(I->getOperand(0));
902 if (Op0Reg == 0) return false;
Chris Lattner743922e2008-09-21 21:44:29 +0000903
904 // Fold immediate in shl(x,3).
905 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
906 unsigned ResultReg = createResultReg(RC);
Dale Johannesen8d13f8f2009-02-13 02:33:27 +0000907 BuildMI(MBB, DL, TII.get(OpImm),
Dan Gohmanb12b1a22008-12-20 17:19:40 +0000908 ResultReg).addReg(Op0Reg).addImm(CI->getZExtValue() & 0xff);
Chris Lattner743922e2008-09-21 21:44:29 +0000909 UpdateValueMap(I, ResultReg);
910 return true;
911 }
912
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000913 unsigned Op1Reg = getRegForValue(I->getOperand(1));
914 if (Op1Reg == 0) return false;
915 TII.copyRegToReg(*MBB, MBB->end(), CReg, Op1Reg, RC, RC);
Dan Gohman145b8282008-10-07 21:50:36 +0000916
917 // The shift instruction uses X86::CL. If we defined a super-register
918 // of X86::CL, emit an EXTRACT_SUBREG to precisely describe what
919 // we're doing here.
920 if (CReg != X86::CL)
Dale Johannesen8d13f8f2009-02-13 02:33:27 +0000921 BuildMI(MBB, DL, TII.get(TargetInstrInfo::EXTRACT_SUBREG), X86::CL)
Dan Gohman145b8282008-10-07 21:50:36 +0000922 .addReg(CReg).addImm(X86::SUBREG_8BIT);
923
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000924 unsigned ResultReg = createResultReg(RC);
Dale Johannesen8d13f8f2009-02-13 02:33:27 +0000925 BuildMI(MBB, DL, TII.get(OpReg), ResultReg).addReg(Op0Reg);
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000926 UpdateValueMap(I, ResultReg);
927 return true;
928}
929
930bool X86FastISel::X86SelectSelect(Instruction *I) {
Chris Lattner160f6cc2008-10-15 05:07:36 +0000931 MVT VT = TLI.getValueType(I->getType(), /*HandleUnknown=*/true);
932 if (VT == MVT::Other || !isTypeLegal(I->getType(), VT))
933 return false;
934
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000935 unsigned Opc = 0;
936 const TargetRegisterClass *RC = NULL;
Chris Lattner160f6cc2008-10-15 05:07:36 +0000937 if (VT.getSimpleVT() == MVT::i16) {
Dan Gohman31d26912008-09-05 21:13:04 +0000938 Opc = X86::CMOVE16rr;
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000939 RC = &X86::GR16RegClass;
Chris Lattner160f6cc2008-10-15 05:07:36 +0000940 } else if (VT.getSimpleVT() == MVT::i32) {
Dan Gohman31d26912008-09-05 21:13:04 +0000941 Opc = X86::CMOVE32rr;
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000942 RC = &X86::GR32RegClass;
Chris Lattner160f6cc2008-10-15 05:07:36 +0000943 } else if (VT.getSimpleVT() == MVT::i64) {
Dan Gohman31d26912008-09-05 21:13:04 +0000944 Opc = X86::CMOVE64rr;
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000945 RC = &X86::GR64RegClass;
946 } else {
947 return false;
948 }
949
950 unsigned Op0Reg = getRegForValue(I->getOperand(0));
951 if (Op0Reg == 0) return false;
952 unsigned Op1Reg = getRegForValue(I->getOperand(1));
953 if (Op1Reg == 0) return false;
954 unsigned Op2Reg = getRegForValue(I->getOperand(2));
955 if (Op2Reg == 0) return false;
956
Dale Johannesen8d13f8f2009-02-13 02:33:27 +0000957 BuildMI(MBB, DL, TII.get(X86::TEST8rr)).addReg(Op0Reg).addReg(Op0Reg);
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000958 unsigned ResultReg = createResultReg(RC);
Dale Johannesen8d13f8f2009-02-13 02:33:27 +0000959 BuildMI(MBB, DL, TII.get(Opc), ResultReg).addReg(Op1Reg).addReg(Op2Reg);
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000960 UpdateValueMap(I, ResultReg);
961 return true;
962}
963
Dan Gohman78efce62008-09-10 21:02:08 +0000964bool X86FastISel::X86SelectFPExt(Instruction *I) {
Chris Lattner160f6cc2008-10-15 05:07:36 +0000965 // fpext from float to double.
966 if (Subtarget->hasSSE2() && I->getType() == Type::DoubleTy) {
967 Value *V = I->getOperand(0);
968 if (V->getType() == Type::FloatTy) {
969 unsigned OpReg = getRegForValue(V);
970 if (OpReg == 0) return false;
971 unsigned ResultReg = createResultReg(X86::FR64RegisterClass);
Dale Johannesen8d13f8f2009-02-13 02:33:27 +0000972 BuildMI(MBB, DL, TII.get(X86::CVTSS2SDrr), ResultReg).addReg(OpReg);
Chris Lattner160f6cc2008-10-15 05:07:36 +0000973 UpdateValueMap(I, ResultReg);
974 return true;
Dan Gohman78efce62008-09-10 21:02:08 +0000975 }
976 }
977
978 return false;
979}
980
981bool X86FastISel::X86SelectFPTrunc(Instruction *I) {
982 if (Subtarget->hasSSE2()) {
983 if (I->getType() == Type::FloatTy) {
984 Value *V = I->getOperand(0);
985 if (V->getType() == Type::DoubleTy) {
986 unsigned OpReg = getRegForValue(V);
987 if (OpReg == 0) return false;
988 unsigned ResultReg = createResultReg(X86::FR32RegisterClass);
Dale Johannesen8d13f8f2009-02-13 02:33:27 +0000989 BuildMI(MBB, DL, TII.get(X86::CVTSD2SSrr), ResultReg).addReg(OpReg);
Dan Gohman78efce62008-09-10 21:02:08 +0000990 UpdateValueMap(I, ResultReg);
991 return true;
992 }
993 }
994 }
995
996 return false;
997}
998
Evan Cheng10a8d9c2008-09-07 08:47:42 +0000999bool X86FastISel::X86SelectTrunc(Instruction *I) {
1000 if (Subtarget->is64Bit())
1001 // All other cases should be handled by the tblgen generated code.
1002 return false;
1003 MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
1004 MVT DstVT = TLI.getValueType(I->getType());
1005 if (DstVT != MVT::i8)
1006 // All other cases should be handled by the tblgen generated code.
1007 return false;
1008 if (SrcVT != MVT::i16 && SrcVT != MVT::i32)
1009 // All other cases should be handled by the tblgen generated code.
1010 return false;
1011
1012 unsigned InputReg = getRegForValue(I->getOperand(0));
1013 if (!InputReg)
1014 // Unhandled operand. Halt "fast" selection and bail.
1015 return false;
1016
1017 // First issue a copy to GR16_ or GR32_.
1018 unsigned CopyOpc = (SrcVT == MVT::i16) ? X86::MOV16to16_ : X86::MOV32to32_;
1019 const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16)
1020 ? X86::GR16_RegisterClass : X86::GR32_RegisterClass;
1021 unsigned CopyReg = createResultReg(CopyRC);
Dale Johannesen8d13f8f2009-02-13 02:33:27 +00001022 BuildMI(MBB, DL, TII.get(CopyOpc), CopyReg).addReg(InputReg);
Evan Cheng10a8d9c2008-09-07 08:47:42 +00001023
1024 // Then issue an extract_subreg.
Evan Cheng536ab132009-01-22 09:10:11 +00001025 unsigned ResultReg = FastEmitInst_extractsubreg(DstVT.getSimpleVT(),
1026 CopyReg, X86::SUBREG_8BIT);
Evan Cheng10a8d9c2008-09-07 08:47:42 +00001027 if (!ResultReg)
1028 return false;
1029
1030 UpdateValueMap(I, ResultReg);
1031 return true;
1032}
1033
Bill Wendling52370a12008-12-09 02:42:50 +00001034bool X86FastISel::X86SelectExtractValue(Instruction *I) {
1035 ExtractValueInst *EI = cast<ExtractValueInst>(I);
1036 Value *Agg = EI->getAggregateOperand();
1037
1038 if (CallInst *CI = dyn_cast<CallInst>(Agg)) {
1039 Function *F = CI->getCalledFunction();
1040
1041 if (F && F->isDeclaration()) {
1042 switch (F->getIntrinsicID()) {
1043 default: break;
1044 case Intrinsic::sadd_with_overflow:
1045 case Intrinsic::uadd_with_overflow:
Bill Wendlingc065b3f2008-12-09 07:55:31 +00001046 // Cheat a little. We know that the registers for "add" and "seto" are
1047 // allocated sequentially. However, we only keep track of the register
1048 // for "add" in the value map. Use extractvalue's index to get the
1049 // correct register for "seto".
Bill Wendling52370a12008-12-09 02:42:50 +00001050 UpdateValueMap(I, lookUpRegForValue(Agg) + *EI->idx_begin());
1051 return true;
1052 }
1053 }
1054 }
1055
1056 return false;
1057}
1058
1059bool X86FastISel::X86VisitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
1060 // FIXME: Handle more intrinsics.
1061 switch (Intrinsic) {
1062 default: return false;
1063 case Intrinsic::sadd_with_overflow:
1064 case Intrinsic::uadd_with_overflow: {
Bill Wendlingc065b3f2008-12-09 07:55:31 +00001065 // Replace "add with overflow" intrinsics with an "add" instruction followed
1066 // by a seto/setc instruction. Later on, when the "extractvalue"
1067 // instructions are encountered, we use the fact that two registers were
1068 // created sequentially to get the correct registers for the "sum" and the
1069 // "overflow bit".
Bill Wendling52370a12008-12-09 02:42:50 +00001070 MVT VT;
1071 const Function *Callee = I.getCalledFunction();
1072 const Type *RetTy =
1073 cast<StructType>(Callee->getReturnType())->getTypeAtIndex(unsigned(0));
1074
1075 if (!isTypeLegal(RetTy, VT))
1076 return false;
1077
1078 Value *Op1 = I.getOperand(1);
1079 Value *Op2 = I.getOperand(2);
1080 unsigned Reg1 = getRegForValue(Op1);
1081 unsigned Reg2 = getRegForValue(Op2);
1082
1083 if (Reg1 == 0 || Reg2 == 0)
1084 // FIXME: Handle values *not* in registers.
1085 return false;
1086
1087 unsigned OpC = 0;
1088
1089 if (VT == MVT::i32)
1090 OpC = X86::ADD32rr;
1091 else if (VT == MVT::i64)
1092 OpC = X86::ADD64rr;
1093 else
1094 return false;
1095
1096 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
Dale Johannesen8d13f8f2009-02-13 02:33:27 +00001097 BuildMI(MBB, DL, TII.get(OpC), ResultReg).addReg(Reg1).addReg(Reg2);
Bill Wendling52370a12008-12-09 02:42:50 +00001098 UpdateValueMap(&I, ResultReg);
1099
1100 ResultReg = createResultReg(TLI.getRegClassFor(MVT::i8));
Dale Johannesen8d13f8f2009-02-13 02:33:27 +00001101 BuildMI(MBB, DL, TII.get((Intrinsic == Intrinsic::sadd_with_overflow) ?
Dan Gohman653456c2009-01-07 00:15:08 +00001102 X86::SETOr : X86::SETBr), ResultReg);
Bill Wendling52370a12008-12-09 02:42:50 +00001103 return true;
1104 }
1105 }
1106}
1107
Evan Chengf3d4efe2008-09-07 09:09:33 +00001108bool X86FastISel::X86SelectCall(Instruction *I) {
1109 CallInst *CI = cast<CallInst>(I);
1110 Value *Callee = I->getOperand(0);
1111
1112 // Can't handle inline asm yet.
1113 if (isa<InlineAsm>(Callee))
1114 return false;
1115
Bill Wendling52370a12008-12-09 02:42:50 +00001116 // Handle intrinsic calls.
1117 if (Function *F = CI->getCalledFunction())
1118 if (F->isDeclaration())
1119 if (unsigned IID = F->getIntrinsicID())
1120 return X86VisitIntrinsicCall(*CI, IID);
Evan Chengf3d4efe2008-09-07 09:09:33 +00001121
Evan Chengf3d4efe2008-09-07 09:09:33 +00001122 // Handle only C and fastcc calling conventions for now.
1123 CallSite CS(CI);
1124 unsigned CC = CS.getCallingConv();
1125 if (CC != CallingConv::C &&
1126 CC != CallingConv::Fast &&
1127 CC != CallingConv::X86_FastCall)
1128 return false;
1129
1130 // Let SDISel handle vararg functions.
1131 const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
1132 const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
1133 if (FTy->isVarArg())
1134 return false;
1135
1136 // Handle *simple* calls for now.
1137 const Type *RetTy = CS.getType();
1138 MVT RetVT;
Dan Gohmanb5b6ec62008-09-17 21:18:49 +00001139 if (RetTy == Type::VoidTy)
1140 RetVT = MVT::isVoid;
Chris Lattner160f6cc2008-10-15 05:07:36 +00001141 else if (!isTypeLegal(RetTy, RetVT, true))
Evan Chengf3d4efe2008-09-07 09:09:33 +00001142 return false;
1143
Dan Gohmanb5b6ec62008-09-17 21:18:49 +00001144 // Materialize callee address in a register. FIXME: GV address can be
1145 // handled with a CALLpcrel32 instead.
Dan Gohman2ff7fd12008-09-19 22:16:54 +00001146 X86AddressMode CalleeAM;
1147 if (!X86SelectAddress(Callee, CalleeAM, true))
1148 return false;
Dan Gohmanb5b6ec62008-09-17 21:18:49 +00001149 unsigned CalleeOp = 0;
Dan Gohman2ff7fd12008-09-19 22:16:54 +00001150 GlobalValue *GV = 0;
1151 if (CalleeAM.Base.Reg != 0) {
1152 assert(CalleeAM.GV == 0);
1153 CalleeOp = CalleeAM.Base.Reg;
1154 } else if (CalleeAM.GV != 0) {
1155 assert(CalleeAM.GV != 0);
1156 GV = CalleeAM.GV;
1157 } else
1158 return false;
Dan Gohmanb5b6ec62008-09-17 21:18:49 +00001159
Evan Chengdebdea02008-09-08 17:15:42 +00001160 // Allow calls which produce i1 results.
1161 bool AndToI1 = false;
1162 if (RetVT == MVT::i1) {
1163 RetVT = MVT::i8;
1164 AndToI1 = true;
1165 }
1166
Evan Chengf3d4efe2008-09-07 09:09:33 +00001167 // Deal with call operands first.
Chris Lattner241ab472008-10-15 05:38:32 +00001168 SmallVector<Value*, 8> ArgVals;
1169 SmallVector<unsigned, 8> Args;
1170 SmallVector<MVT, 8> ArgVTs;
1171 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
Evan Chengf3d4efe2008-09-07 09:09:33 +00001172 Args.reserve(CS.arg_size());
Chris Lattner241ab472008-10-15 05:38:32 +00001173 ArgVals.reserve(CS.arg_size());
Evan Chengf3d4efe2008-09-07 09:09:33 +00001174 ArgVTs.reserve(CS.arg_size());
1175 ArgFlags.reserve(CS.arg_size());
1176 for (CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
1177 i != e; ++i) {
1178 unsigned Arg = getRegForValue(*i);
1179 if (Arg == 0)
1180 return false;
1181 ISD::ArgFlagsTy Flags;
1182 unsigned AttrInd = i - CS.arg_begin() + 1;
Devang Patel05988662008-09-25 21:00:45 +00001183 if (CS.paramHasAttr(AttrInd, Attribute::SExt))
Evan Chengf3d4efe2008-09-07 09:09:33 +00001184 Flags.setSExt();
Devang Patel05988662008-09-25 21:00:45 +00001185 if (CS.paramHasAttr(AttrInd, Attribute::ZExt))
Evan Chengf3d4efe2008-09-07 09:09:33 +00001186 Flags.setZExt();
1187
1188 // FIXME: Only handle *easy* calls for now.
Devang Patel05988662008-09-25 21:00:45 +00001189 if (CS.paramHasAttr(AttrInd, Attribute::InReg) ||
1190 CS.paramHasAttr(AttrInd, Attribute::StructRet) ||
1191 CS.paramHasAttr(AttrInd, Attribute::Nest) ||
1192 CS.paramHasAttr(AttrInd, Attribute::ByVal))
Evan Chengf3d4efe2008-09-07 09:09:33 +00001193 return false;
1194
1195 const Type *ArgTy = (*i)->getType();
1196 MVT ArgVT;
Chris Lattner160f6cc2008-10-15 05:07:36 +00001197 if (!isTypeLegal(ArgTy, ArgVT))
Evan Chengf3d4efe2008-09-07 09:09:33 +00001198 return false;
1199 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
1200 Flags.setOrigAlign(OriginalAlignment);
1201
1202 Args.push_back(Arg);
Chris Lattner241ab472008-10-15 05:38:32 +00001203 ArgVals.push_back(*i);
Evan Chengf3d4efe2008-09-07 09:09:33 +00001204 ArgVTs.push_back(ArgVT);
1205 ArgFlags.push_back(Flags);
1206 }
1207
1208 // Analyze operands of the call, assigning locations to each operand.
1209 SmallVector<CCValAssign, 16> ArgLocs;
1210 CCState CCInfo(CC, false, TM, ArgLocs);
1211 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC));
1212
1213 // Get a count of how many bytes are to be pushed on the stack.
1214 unsigned NumBytes = CCInfo.getNextStackOffset();
1215
1216 // Issue CALLSEQ_START
Dan Gohman6d4b0522008-10-01 18:28:06 +00001217 unsigned AdjStackDown = TM.getRegisterInfo()->getCallFrameSetupOpcode();
Dale Johannesen8d13f8f2009-02-13 02:33:27 +00001218 BuildMI(MBB, DL, TII.get(AdjStackDown)).addImm(NumBytes);
Evan Chengf3d4efe2008-09-07 09:09:33 +00001219
Chris Lattner438949a2008-10-15 05:30:52 +00001220 // Process argument: walk the register/memloc assignments, inserting
Evan Chengf3d4efe2008-09-07 09:09:33 +00001221 // copies / loads.
1222 SmallVector<unsigned, 4> RegArgs;
1223 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1224 CCValAssign &VA = ArgLocs[i];
1225 unsigned Arg = Args[VA.getValNo()];
1226 MVT ArgVT = ArgVTs[VA.getValNo()];
1227
1228 // Promote the value if needed.
1229 switch (VA.getLocInfo()) {
1230 default: assert(0 && "Unknown loc info!");
1231 case CCValAssign::Full: break;
Evan Cheng24e3a902008-09-08 06:35:17 +00001232 case CCValAssign::SExt: {
1233 bool Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(),
1234 Arg, ArgVT, Arg);
Chris Lattnera33649e2008-12-19 17:03:38 +00001235 assert(Emitted && "Failed to emit a sext!"); Emitted=Emitted;
Devang Patelfd1c6c32008-12-23 21:56:28 +00001236 Emitted = true;
Evan Cheng24e3a902008-09-08 06:35:17 +00001237 ArgVT = VA.getLocVT();
Evan Chengf3d4efe2008-09-07 09:09:33 +00001238 break;
Evan Cheng24e3a902008-09-08 06:35:17 +00001239 }
1240 case CCValAssign::ZExt: {
1241 bool Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(),
1242 Arg, ArgVT, Arg);
Chris Lattnera33649e2008-12-19 17:03:38 +00001243 assert(Emitted && "Failed to emit a zext!"); Emitted=Emitted;
Devang Patelfd1c6c32008-12-23 21:56:28 +00001244 Emitted = true;
Evan Cheng24e3a902008-09-08 06:35:17 +00001245 ArgVT = VA.getLocVT();
Evan Chengf3d4efe2008-09-07 09:09:33 +00001246 break;
Evan Cheng24e3a902008-09-08 06:35:17 +00001247 }
1248 case CCValAssign::AExt: {
1249 bool Emitted = X86FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(),
1250 Arg, ArgVT, Arg);
Owen Andersonb6369132008-09-11 02:41:37 +00001251 if (!Emitted)
1252 Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(),
Chris Lattner160f6cc2008-10-15 05:07:36 +00001253 Arg, ArgVT, Arg);
Owen Andersonb6369132008-09-11 02:41:37 +00001254 if (!Emitted)
1255 Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(),
1256 Arg, ArgVT, Arg);
1257
Chris Lattnera33649e2008-12-19 17:03:38 +00001258 assert(Emitted && "Failed to emit a aext!"); Emitted=Emitted;
Evan Cheng24e3a902008-09-08 06:35:17 +00001259 ArgVT = VA.getLocVT();
Evan Chengf3d4efe2008-09-07 09:09:33 +00001260 break;
1261 }
Evan Cheng24e3a902008-09-08 06:35:17 +00001262 }
Evan Chengf3d4efe2008-09-07 09:09:33 +00001263
1264 if (VA.isRegLoc()) {
1265 TargetRegisterClass* RC = TLI.getRegClassFor(ArgVT);
1266 bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), VA.getLocReg(),
1267 Arg, RC, RC);
Chris Lattnera33649e2008-12-19 17:03:38 +00001268 assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted;
Devang Patelfd1c6c32008-12-23 21:56:28 +00001269 Emitted = true;
Evan Chengf3d4efe2008-09-07 09:09:33 +00001270 RegArgs.push_back(VA.getLocReg());
1271 } else {
1272 unsigned LocMemOffset = VA.getLocMemOffset();
Dan Gohman0586d912008-09-10 20:11:02 +00001273 X86AddressMode AM;
1274 AM.Base.Reg = StackPtr;
1275 AM.Disp = LocMemOffset;
Chris Lattner241ab472008-10-15 05:38:32 +00001276 Value *ArgVal = ArgVals[VA.getValNo()];
1277
1278 // If this is a really simple value, emit this with the Value* version of
1279 // X86FastEmitStore. If it isn't simple, we don't want to do this, as it
1280 // can cause us to reevaluate the argument.
1281 if (isa<ConstantInt>(ArgVal) || isa<ConstantPointerNull>(ArgVal))
1282 X86FastEmitStore(ArgVT, ArgVal, AM);
1283 else
1284 X86FastEmitStore(ArgVT, Arg, AM);
Evan Chengf3d4efe2008-09-07 09:09:33 +00001285 }
1286 }
1287
Dan Gohman2cc3aa42008-09-25 15:24:26 +00001288 // ELF / PIC requires GOT in the EBX register before function calls via PLT
1289 // GOT pointer.
1290 if (!Subtarget->is64Bit() &&
1291 TM.getRelocationModel() == Reloc::PIC_ &&
1292 Subtarget->isPICStyleGOT()) {
1293 TargetRegisterClass *RC = X86::GR32RegisterClass;
Dan Gohman57c3dac2008-09-30 00:58:23 +00001294 unsigned Base = getInstrInfo()->getGlobalBaseReg(&MF);
Dan Gohman2cc3aa42008-09-25 15:24:26 +00001295 bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), X86::EBX, Base, RC, RC);
Chris Lattnera33649e2008-12-19 17:03:38 +00001296 assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted;
Devang Patelfd1c6c32008-12-23 21:56:28 +00001297 Emitted = true;
Dan Gohman2cc3aa42008-09-25 15:24:26 +00001298 }
1299
Evan Chengf3d4efe2008-09-07 09:09:33 +00001300 // Issue the call.
1301 unsigned CallOpc = CalleeOp
1302 ? (Subtarget->is64Bit() ? X86::CALL64r : X86::CALL32r)
1303 : (Subtarget->is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32);
1304 MachineInstrBuilder MIB = CalleeOp
Dale Johannesen8d13f8f2009-02-13 02:33:27 +00001305 ? BuildMI(MBB, DL, TII.get(CallOpc)).addReg(CalleeOp)
1306 : BuildMI(MBB, DL, TII.get(CallOpc)).addGlobalAddress(GV);
Dan Gohman2cc3aa42008-09-25 15:24:26 +00001307
1308 // Add an implicit use GOT pointer in EBX.
1309 if (!Subtarget->is64Bit() &&
1310 TM.getRelocationModel() == Reloc::PIC_ &&
1311 Subtarget->isPICStyleGOT())
1312 MIB.addReg(X86::EBX);
1313
Evan Chengf3d4efe2008-09-07 09:09:33 +00001314 // Add implicit physical register uses to the call.
Dan Gohman8c3f8b62008-10-07 22:10:33 +00001315 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
1316 MIB.addReg(RegArgs[i]);
Evan Chengf3d4efe2008-09-07 09:09:33 +00001317
1318 // Issue CALLSEQ_END
Dan Gohman6d4b0522008-10-01 18:28:06 +00001319 unsigned AdjStackUp = TM.getRegisterInfo()->getCallFrameDestroyOpcode();
Dale Johannesen8d13f8f2009-02-13 02:33:27 +00001320 BuildMI(MBB, DL, TII.get(AdjStackUp)).addImm(NumBytes).addImm(0);
Evan Chengf3d4efe2008-09-07 09:09:33 +00001321
1322 // Now handle call return value (if any).
Evan Chengf3d4efe2008-09-07 09:09:33 +00001323 if (RetVT.getSimpleVT() != MVT::isVoid) {
1324 SmallVector<CCValAssign, 16> RVLocs;
1325 CCState CCInfo(CC, false, TM, RVLocs);
1326 CCInfo.AnalyzeCallResult(RetVT, RetCC_X86);
1327
1328 // Copy all of the result registers out of their specified physreg.
1329 assert(RVLocs.size() == 1 && "Can't handle multi-value calls!");
1330 MVT CopyVT = RVLocs[0].getValVT();
1331 TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);
1332 TargetRegisterClass *SrcRC = DstRC;
1333
1334 // If this is a call to a function that returns an fp value on the x87 fp
1335 // stack, but where we prefer to use the value in xmm registers, copy it
1336 // out as F80 and use a truncate to move it from fp stack reg to xmm reg.
1337 if ((RVLocs[0].getLocReg() == X86::ST0 ||
1338 RVLocs[0].getLocReg() == X86::ST1) &&
1339 isScalarFPTypeInSSEReg(RVLocs[0].getValVT())) {
1340 CopyVT = MVT::f80;
1341 SrcRC = X86::RSTRegisterClass;
1342 DstRC = X86::RFP80RegisterClass;
1343 }
1344
1345 unsigned ResultReg = createResultReg(DstRC);
1346 bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
1347 RVLocs[0].getLocReg(), DstRC, SrcRC);
Chris Lattnera33649e2008-12-19 17:03:38 +00001348 assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted;
Devang Patelfd1c6c32008-12-23 21:56:28 +00001349 Emitted = true;
Evan Chengf3d4efe2008-09-07 09:09:33 +00001350 if (CopyVT != RVLocs[0].getValVT()) {
1351 // Round the F80 the right size, which also moves to the appropriate xmm
1352 // register. This is accomplished by storing the F80 value in memory and
1353 // then loading it back. Ewww...
1354 MVT ResVT = RVLocs[0].getValVT();
1355 unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
1356 unsigned MemSize = ResVT.getSizeInBits()/8;
Dan Gohman0586d912008-09-10 20:11:02 +00001357 int FI = MFI.CreateStackObject(MemSize, MemSize);
Dale Johannesen8d13f8f2009-02-13 02:33:27 +00001358 addFrameReference(BuildMI(MBB, DL, TII.get(Opc)), FI).addReg(ResultReg);
Evan Chengf3d4efe2008-09-07 09:09:33 +00001359 DstRC = ResVT == MVT::f32
1360 ? X86::FR32RegisterClass : X86::FR64RegisterClass;
1361 Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm;
1362 ResultReg = createResultReg(DstRC);
Dale Johannesen8d13f8f2009-02-13 02:33:27 +00001363 addFrameReference(BuildMI(MBB, DL, TII.get(Opc), ResultReg), FI);
Evan Chengf3d4efe2008-09-07 09:09:33 +00001364 }
1365
Evan Chengdebdea02008-09-08 17:15:42 +00001366 if (AndToI1) {
1367 // Mask out all but lowest bit for some call which produces an i1.
1368 unsigned AndResult = createResultReg(X86::GR8RegisterClass);
Dale Johannesen8d13f8f2009-02-13 02:33:27 +00001369 BuildMI(MBB, DL,
1370 TII.get(X86::AND8ri), AndResult).addReg(ResultReg).addImm(1);
Evan Chengdebdea02008-09-08 17:15:42 +00001371 ResultReg = AndResult;
1372 }
1373
Evan Chengf3d4efe2008-09-07 09:09:33 +00001374 UpdateValueMap(I, ResultReg);
1375 }
1376
1377 return true;
1378}
1379
1380
Dan Gohman99b21822008-08-28 23:21:34 +00001381bool
Dan Gohman3df24e62008-09-03 23:12:08 +00001382X86FastISel::TargetSelectInstruction(Instruction *I) {
Dan Gohman99b21822008-08-28 23:21:34 +00001383 switch (I->getOpcode()) {
1384 default: break;
Evan Cheng8b19e562008-09-03 06:44:39 +00001385 case Instruction::Load:
Dan Gohman3df24e62008-09-03 23:12:08 +00001386 return X86SelectLoad(I);
Owen Anderson79924eb2008-09-04 16:48:33 +00001387 case Instruction::Store:
1388 return X86SelectStore(I);
Dan Gohman6e3f05f2008-09-04 23:26:51 +00001389 case Instruction::ICmp:
1390 case Instruction::FCmp:
1391 return X86SelectCmp(I);
Dan Gohmand89ae992008-09-05 01:06:14 +00001392 case Instruction::ZExt:
1393 return X86SelectZExt(I);
1394 case Instruction::Br:
1395 return X86SelectBranch(I);
Evan Chengf3d4efe2008-09-07 09:09:33 +00001396 case Instruction::Call:
1397 return X86SelectCall(I);
Dan Gohmanc39f4db2008-09-05 18:30:08 +00001398 case Instruction::LShr:
1399 case Instruction::AShr:
1400 case Instruction::Shl:
1401 return X86SelectShift(I);
1402 case Instruction::Select:
1403 return X86SelectSelect(I);
Evan Cheng10a8d9c2008-09-07 08:47:42 +00001404 case Instruction::Trunc:
1405 return X86SelectTrunc(I);
Dan Gohman78efce62008-09-10 21:02:08 +00001406 case Instruction::FPExt:
1407 return X86SelectFPExt(I);
1408 case Instruction::FPTrunc:
1409 return X86SelectFPTrunc(I);
Bill Wendling52370a12008-12-09 02:42:50 +00001410 case Instruction::ExtractValue:
1411 return X86SelectExtractValue(I);
Dan Gohman99b21822008-08-28 23:21:34 +00001412 }
1413
1414 return false;
1415}
1416
Dan Gohman0586d912008-09-10 20:11:02 +00001417unsigned X86FastISel::TargetMaterializeConstant(Constant *C) {
Evan Cheng59fbc802008-09-09 01:26:59 +00001418 MVT VT;
Chris Lattner160f6cc2008-10-15 05:07:36 +00001419 if (!isTypeLegal(C->getType(), VT))
Owen Anderson95267a12008-09-05 00:06:23 +00001420 return false;
1421
1422 // Get opcode and regclass of the output for the given load instruction.
1423 unsigned Opc = 0;
1424 const TargetRegisterClass *RC = NULL;
1425 switch (VT.getSimpleVT()) {
1426 default: return false;
1427 case MVT::i8:
1428 Opc = X86::MOV8rm;
1429 RC = X86::GR8RegisterClass;
1430 break;
1431 case MVT::i16:
1432 Opc = X86::MOV16rm;
1433 RC = X86::GR16RegisterClass;
1434 break;
1435 case MVT::i32:
1436 Opc = X86::MOV32rm;
1437 RC = X86::GR32RegisterClass;
1438 break;
1439 case MVT::i64:
1440 // Must be in x86-64 mode.
1441 Opc = X86::MOV64rm;
1442 RC = X86::GR64RegisterClass;
1443 break;
1444 case MVT::f32:
1445 if (Subtarget->hasSSE1()) {
1446 Opc = X86::MOVSSrm;
1447 RC = X86::FR32RegisterClass;
1448 } else {
1449 Opc = X86::LD_Fp32m;
1450 RC = X86::RFP32RegisterClass;
1451 }
1452 break;
1453 case MVT::f64:
1454 if (Subtarget->hasSSE2()) {
1455 Opc = X86::MOVSDrm;
1456 RC = X86::FR64RegisterClass;
1457 } else {
1458 Opc = X86::LD_Fp64m;
1459 RC = X86::RFP64RegisterClass;
1460 }
1461 break;
1462 case MVT::f80:
Dan Gohman5af29c22008-09-26 01:39:32 +00001463 // No f80 support yet.
1464 return false;
Owen Anderson95267a12008-09-05 00:06:23 +00001465 }
1466
Dan Gohman2ff7fd12008-09-19 22:16:54 +00001467 // Materialize addresses with LEA instructions.
Owen Anderson95267a12008-09-05 00:06:23 +00001468 if (isa<GlobalValue>(C)) {
Dan Gohman2ff7fd12008-09-19 22:16:54 +00001469 X86AddressMode AM;
1470 if (X86SelectAddress(C, AM, false)) {
1471 if (TLI.getPointerTy() == MVT::i32)
1472 Opc = X86::LEA32r;
1473 else
1474 Opc = X86::LEA64r;
1475 unsigned ResultReg = createResultReg(RC);
Dale Johannesen8d13f8f2009-02-13 02:33:27 +00001476 addFullAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
Owen Anderson95267a12008-09-05 00:06:23 +00001477 return ResultReg;
Dan Gohman2ff7fd12008-09-19 22:16:54 +00001478 }
Evan Cheng0de588f2008-09-05 21:00:03 +00001479 return 0;
Owen Anderson95267a12008-09-05 00:06:23 +00001480 }
1481
Owen Anderson3b217c62008-09-06 01:11:01 +00001482 // MachineConstantPool wants an explicit alignment.
Dan Gohman1fbc3cd2008-09-18 18:26:43 +00001483 unsigned Align = TD.getPreferredTypeAlignmentShift(C->getType());
Owen Anderson3b217c62008-09-06 01:11:01 +00001484 if (Align == 0) {
1485 // Alignment of vector types. FIXME!
Duncan Sandsceb4d1a2009-01-12 20:38:59 +00001486 Align = TD.getTypePaddedSize(C->getType());
Owen Anderson3b217c62008-09-06 01:11:01 +00001487 Align = Log2_64(Align);
1488 }
Owen Anderson95267a12008-09-05 00:06:23 +00001489
Dan Gohman5396c992008-09-30 01:21:32 +00001490 // x86-32 PIC requires a PIC base register for constant pools.
1491 unsigned PICBase = 0;
1492 if (TM.getRelocationModel() == Reloc::PIC_ &&
1493 !Subtarget->is64Bit())
1494 PICBase = getInstrInfo()->getGlobalBaseReg(&MF);
1495
1496 // Create the load from the constant pool.
Dan Gohman0586d912008-09-10 20:11:02 +00001497 unsigned MCPOffset = MCP.getConstantPoolIndex(C, Align);
Dan Gohman2ff7fd12008-09-19 22:16:54 +00001498 unsigned ResultReg = createResultReg(RC);
Dale Johannesen8d13f8f2009-02-13 02:33:27 +00001499 addConstantPoolReference(BuildMI(MBB, DL, TII.get(Opc), ResultReg), MCPOffset,
Dan Gohman5396c992008-09-30 01:21:32 +00001500 PICBase);
1501
Owen Anderson95267a12008-09-05 00:06:23 +00001502 return ResultReg;
1503}
1504
Dan Gohman0586d912008-09-10 20:11:02 +00001505unsigned X86FastISel::TargetMaterializeAlloca(AllocaInst *C) {
Dan Gohman4e6ed5e2008-10-03 01:27:49 +00001506 // Fail on dynamic allocas. At this point, getRegForValue has already
1507 // checked its CSE maps, so if we're here trying to handle a dynamic
1508 // alloca, we're not going to succeed. X86SelectAddress has a
1509 // check for dynamic allocas, because it's called directly from
1510 // various places, but TargetMaterializeAlloca also needs a check
1511 // in order to avoid recursion between getRegForValue,
1512 // X86SelectAddrss, and TargetMaterializeAlloca.
1513 if (!StaticAllocaMap.count(C))
1514 return 0;
1515
Dan Gohman0586d912008-09-10 20:11:02 +00001516 X86AddressMode AM;
Dan Gohman2ff7fd12008-09-19 22:16:54 +00001517 if (!X86SelectAddress(C, AM, false))
Dan Gohman0586d912008-09-10 20:11:02 +00001518 return 0;
1519 unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
1520 TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
1521 unsigned ResultReg = createResultReg(RC);
Dale Johannesen8d13f8f2009-02-13 02:33:27 +00001522 addFullAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
Dan Gohman0586d912008-09-10 20:11:02 +00001523 return ResultReg;
1524}
1525
Evan Chengc3f44b02008-09-03 00:03:49 +00001526namespace llvm {
Dan Gohman3df24e62008-09-03 23:12:08 +00001527 llvm::FastISel *X86::createFastISel(MachineFunction &mf,
Dan Gohmand57dd5f2008-09-23 21:53:34 +00001528 MachineModuleInfo *mmi,
Devang Patel83489bb2009-01-13 00:35:13 +00001529 DwarfWriter *dw,
Dan Gohman3df24e62008-09-03 23:12:08 +00001530 DenseMap<const Value *, unsigned> &vm,
Dan Gohman0586d912008-09-10 20:11:02 +00001531 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
Dan Gohmandd5b58a2008-10-14 23:54:11 +00001532 DenseMap<const AllocaInst *, int> &am
1533#ifndef NDEBUG
1534 , SmallSet<Instruction*, 8> &cil
1535#endif
1536 ) {
Devang Patel83489bb2009-01-13 00:35:13 +00001537 return new X86FastISel(mf, mmi, dw, vm, bm, am
Dan Gohmandd5b58a2008-10-14 23:54:11 +00001538#ifndef NDEBUG
1539 , cil
1540#endif
1541 );
Evan Chengc3f44b02008-09-03 00:03:49 +00001542 }
Dan Gohman99b21822008-08-28 23:21:34 +00001543}