blob: 6f6b9908da196a5b148a6f9cf6c0d2c655504234 [file] [log] [blame]
Dan Gohman1adf1b02008-08-19 21:45:35 +00001//===-- X86FastISel.cpp - X86 FastISel implementation ---------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the X86-specific support for the FastISel class. Much
11// of the target-specific code is generated by tablegen in the file
12// X86GenFastISel.inc, which is #included here.
13//
14//===----------------------------------------------------------------------===//
15
16#include "X86.h"
Evan Cheng8b19e562008-09-03 06:44:39 +000017#include "X86InstrBuilder.h"
Dan Gohman1adf1b02008-08-19 21:45:35 +000018#include "X86ISelLowering.h"
Evan Cheng88e30412008-09-03 01:04:47 +000019#include "X86RegisterInfo.h"
20#include "X86Subtarget.h"
Dan Gohman22bb3112008-08-22 00:20:26 +000021#include "X86TargetMachine.h"
Dan Gohmand89ae992008-09-05 01:06:14 +000022#include "llvm/Instructions.h"
Dan Gohman6e3f05f2008-09-04 23:26:51 +000023#include "llvm/DerivedTypes.h"
Evan Chengc3f44b02008-09-03 00:03:49 +000024#include "llvm/CodeGen/FastISel.h"
Owen Anderson95267a12008-09-05 00:06:23 +000025#include "llvm/CodeGen/MachineConstantPool.h"
Owen Anderson667d8f72008-08-29 17:45:56 +000026#include "llvm/CodeGen/MachineRegisterInfo.h"
Evan Chengc3f44b02008-09-03 00:03:49 +000027
28using namespace llvm;
29
30class X86FastISel : public FastISel {
31 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
32 /// make the right decision when generating code for different targets.
33 const X86Subtarget *Subtarget;
34
Evan Cheng8b19e562008-09-03 06:44:39 +000035public:
Dan Gohman3df24e62008-09-03 23:12:08 +000036 explicit X86FastISel(MachineFunction &mf,
37 DenseMap<const Value *, unsigned> &vm,
38 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm)
39 : FastISel(mf, vm, bm) {
Evan Cheng88e30412008-09-03 01:04:47 +000040 Subtarget = &TM.getSubtarget<X86Subtarget>();
41 }
Evan Chengc3f44b02008-09-03 00:03:49 +000042
Dan Gohman3df24e62008-09-03 23:12:08 +000043 virtual bool TargetSelectInstruction(Instruction *I);
Evan Chengc3f44b02008-09-03 00:03:49 +000044
Dan Gohman1adf1b02008-08-19 21:45:35 +000045#include "X86GenFastISel.inc"
Evan Cheng8b19e562008-09-03 06:44:39 +000046
47private:
Dan Gohman3df24e62008-09-03 23:12:08 +000048 bool X86SelectConstAddr(Value *V, unsigned &Op0);
Evan Cheng8b19e562008-09-03 06:44:39 +000049
Dan Gohman3df24e62008-09-03 23:12:08 +000050 bool X86SelectLoad(Instruction *I);
Owen Andersona3971df2008-09-04 07:08:58 +000051
52 bool X86SelectStore(Instruction *I);
Dan Gohman6e3f05f2008-09-04 23:26:51 +000053
54 bool X86SelectCmp(Instruction *I);
Dan Gohmand89ae992008-09-05 01:06:14 +000055
56 bool X86SelectZExt(Instruction *I);
57
58 bool X86SelectBranch(Instruction *I);
Owen Anderson95267a12008-09-05 00:06:23 +000059
60 unsigned TargetSelectConstantPoolLoad(Constant *C, MachineConstantPool* MCP);
Evan Chengc3f44b02008-09-03 00:03:49 +000061};
Dan Gohman99b21822008-08-28 23:21:34 +000062
Evan Cheng8b19e562008-09-03 06:44:39 +000063/// X86SelectConstAddr - Select and emit code to materialize constant address.
64///
65bool X86FastISel::X86SelectConstAddr(Value *V,
Dan Gohman3df24e62008-09-03 23:12:08 +000066 unsigned &Op0) {
Evan Cheng8b19e562008-09-03 06:44:39 +000067 // FIXME: Only GlobalAddress for now.
68 GlobalValue *GV = dyn_cast<GlobalValue>(V);
69 if (!GV)
70 return false;
71
72 if (Subtarget->GVRequiresExtraLoad(GV, TM, false)) {
73 // Issue load from stub if necessary.
74 unsigned Opc = 0;
75 const TargetRegisterClass *RC = NULL;
76 if (TLI.getPointerTy() == MVT::i32) {
77 Opc = X86::MOV32rm;
78 RC = X86::GR32RegisterClass;
79 } else {
80 Opc = X86::MOV64rm;
81 RC = X86::GR64RegisterClass;
82 }
83 Op0 = createResultReg(RC);
84 X86AddressMode AM;
85 AM.GV = GV;
86 addFullAddress(BuildMI(MBB, TII.get(Opc), Op0), AM);
Evan Cheng373d50a2008-09-04 06:18:33 +000087 // Prevent loading GV stub multiple times in same MBB.
88 LocalValueMap[V] = Op0;
Evan Cheng8b19e562008-09-03 06:44:39 +000089 }
90 return true;
91}
92
Owen Andersona3971df2008-09-04 07:08:58 +000093/// X86SelectStore - Select and emit code to implement store instructions.
94bool X86FastISel::X86SelectStore(Instruction* I) {
95 MVT VT = MVT::getMVT(I->getOperand(0)->getType());
96 if (VT == MVT::Other || !VT.isSimple())
97 // Unhandled type. Halt "fast" selection and bail.
98 return false;
99 if (VT == MVT::iPTR)
100 // Use pointer type.
101 VT = TLI.getPointerTy();
102 // We only handle legal types. For example, on x86-32 the instruction
103 // selector contains all of the 64-bit instructions from x86-64,
104 // under the assumption that i64 won't be used if the target doesn't
105 // support it.
106 if (!TLI.isTypeLegal(VT))
107 return false;
108 unsigned Op0 = getRegForValue(I->getOperand(0));
109 if (Op0 == 0)
110 // Unhandled operand. Halt "fast" selection and bail.
111 return false;
112
113 Value *V = I->getOperand(1);
114 unsigned Op1 = getRegForValue(V);
115 if (Op1 == 0) {
116 // Handle constant load address.
117 if (!isa<Constant>(V) || !X86SelectConstAddr(V, Op1))
118 // Unhandled operand. Halt "fast" selection and bail.
119 return false;
120 }
121
122 // Get opcode and regclass of the output for the given load instruction.
123 unsigned Opc = 0;
124 const TargetRegisterClass *RC = NULL;
125 switch (VT.getSimpleVT()) {
126 default: return false;
127 case MVT::i8:
128 Opc = X86::MOV8mr;
129 RC = X86::GR8RegisterClass;
130 break;
131 case MVT::i16:
132 Opc = X86::MOV16mr;
133 RC = X86::GR16RegisterClass;
134 break;
135 case MVT::i32:
136 Opc = X86::MOV32mr;
137 RC = X86::GR32RegisterClass;
138 break;
139 case MVT::i64:
140 // Must be in x86-64 mode.
141 Opc = X86::MOV64mr;
142 RC = X86::GR64RegisterClass;
143 break;
144 case MVT::f32:
145 if (Subtarget->hasSSE1()) {
146 Opc = X86::MOVSSmr;
147 RC = X86::FR32RegisterClass;
148 } else {
149 Opc = X86::ST_Fp32m;
150 RC = X86::RFP32RegisterClass;
151 }
152 break;
153 case MVT::f64:
154 if (Subtarget->hasSSE2()) {
155 Opc = X86::MOVSDmr;
156 RC = X86::FR64RegisterClass;
157 } else {
158 Opc = X86::ST_Fp64m;
159 RC = X86::RFP64RegisterClass;
160 }
161 break;
162 case MVT::f80:
163 Opc = X86::ST_FP80m;
164 RC = X86::RFP80RegisterClass;
165 break;
166 }
167
168 X86AddressMode AM;
169 if (Op1)
170 // Address is in register.
Owen Anderson79924eb2008-09-04 16:48:33 +0000171 AM.Base.Reg = Op1;
Owen Andersona3971df2008-09-04 07:08:58 +0000172 else
173 AM.GV = cast<GlobalValue>(V);
Owen Anderson79924eb2008-09-04 16:48:33 +0000174 addFullAddress(BuildMI(MBB, TII.get(Opc)), AM).addReg(Op0);
Owen Andersona3971df2008-09-04 07:08:58 +0000175 return true;
176}
177
Evan Cheng8b19e562008-09-03 06:44:39 +0000178/// X86SelectLoad - Select and emit code to implement load instructions.
179///
Dan Gohman3df24e62008-09-03 23:12:08 +0000180bool X86FastISel::X86SelectLoad(Instruction *I) {
Evan Cheng8b19e562008-09-03 06:44:39 +0000181 MVT VT = MVT::getMVT(I->getType(), /*HandleUnknown=*/true);
182 if (VT == MVT::Other || !VT.isSimple())
183 // Unhandled type. Halt "fast" selection and bail.
184 return false;
185 if (VT == MVT::iPTR)
186 // Use pointer type.
187 VT = TLI.getPointerTy();
188 // We only handle legal types. For example, on x86-32 the instruction
189 // selector contains all of the 64-bit instructions from x86-64,
190 // under the assumption that i64 won't be used if the target doesn't
191 // support it.
192 if (!TLI.isTypeLegal(VT))
193 return false;
194
195 Value *V = I->getOperand(0);
Dan Gohman3df24e62008-09-03 23:12:08 +0000196 unsigned Op0 = getRegForValue(V);
Evan Cheng8b19e562008-09-03 06:44:39 +0000197 if (Op0 == 0) {
198 // Handle constant load address.
Dan Gohman3df24e62008-09-03 23:12:08 +0000199 if (!isa<Constant>(V) || !X86SelectConstAddr(V, Op0))
Evan Cheng8b19e562008-09-03 06:44:39 +0000200 // Unhandled operand. Halt "fast" selection and bail.
201 return false;
202 }
203
204 // Get opcode and regclass of the output for the given load instruction.
205 unsigned Opc = 0;
206 const TargetRegisterClass *RC = NULL;
207 switch (VT.getSimpleVT()) {
208 default: return false;
209 case MVT::i8:
210 Opc = X86::MOV8rm;
211 RC = X86::GR8RegisterClass;
212 break;
213 case MVT::i16:
214 Opc = X86::MOV16rm;
215 RC = X86::GR16RegisterClass;
216 break;
217 case MVT::i32:
218 Opc = X86::MOV32rm;
219 RC = X86::GR32RegisterClass;
220 break;
221 case MVT::i64:
222 // Must be in x86-64 mode.
223 Opc = X86::MOV64rm;
224 RC = X86::GR64RegisterClass;
225 break;
226 case MVT::f32:
227 if (Subtarget->hasSSE1()) {
228 Opc = X86::MOVSSrm;
229 RC = X86::FR32RegisterClass;
230 } else {
231 Opc = X86::LD_Fp32m;
232 RC = X86::RFP32RegisterClass;
233 }
234 break;
235 case MVT::f64:
236 if (Subtarget->hasSSE2()) {
237 Opc = X86::MOVSDrm;
238 RC = X86::FR64RegisterClass;
239 } else {
240 Opc = X86::LD_Fp64m;
241 RC = X86::RFP64RegisterClass;
242 }
243 break;
244 case MVT::f80:
245 Opc = X86::LD_Fp80m;
246 RC = X86::RFP80RegisterClass;
247 break;
248 }
249
250 unsigned ResultReg = createResultReg(RC);
251 X86AddressMode AM;
252 if (Op0)
253 // Address is in register.
254 AM.Base.Reg = Op0;
255 else
256 AM.GV = cast<GlobalValue>(V);
257 addFullAddress(BuildMI(MBB, TII.get(Opc), ResultReg), AM);
Dan Gohman3df24e62008-09-03 23:12:08 +0000258 UpdateValueMap(I, ResultReg);
Evan Cheng8b19e562008-09-03 06:44:39 +0000259 return true;
260}
261
Dan Gohman6e3f05f2008-09-04 23:26:51 +0000262bool X86FastISel::X86SelectCmp(Instruction *I) {
263 CmpInst *CI = cast<CmpInst>(I);
264
Dan Gohman4f22bb02008-09-05 01:33:56 +0000265 MVT VT = TLI.getValueType(I->getOperand(0)->getType());
266 if (!TLI.isTypeLegal(VT))
267 return false;
268
Dan Gohman6e3f05f2008-09-04 23:26:51 +0000269 unsigned Op0Reg = getRegForValue(CI->getOperand(0));
Dan Gohmanf52550b2008-09-05 01:15:35 +0000270 if (Op0Reg == 0) return false;
Dan Gohman6e3f05f2008-09-04 23:26:51 +0000271 unsigned Op1Reg = getRegForValue(CI->getOperand(1));
Dan Gohmanf52550b2008-09-05 01:15:35 +0000272 if (Op1Reg == 0) return false;
273
Dan Gohman6e3f05f2008-09-04 23:26:51 +0000274 unsigned Opc;
Dan Gohmanf52550b2008-09-05 01:15:35 +0000275 switch (VT.getSimpleVT()) {
Dan Gohman6e3f05f2008-09-04 23:26:51 +0000276 case MVT::i8: Opc = X86::CMP8rr; break;
277 case MVT::i16: Opc = X86::CMP16rr; break;
278 case MVT::i32: Opc = X86::CMP32rr; break;
279 case MVT::i64: Opc = X86::CMP64rr; break;
280 case MVT::f32: Opc = X86::UCOMISSrr; break;
281 case MVT::f64: Opc = X86::UCOMISDrr; break;
282 default: return false;
283 }
284
285 unsigned ResultReg = createResultReg(&X86::GR8RegClass);
286 switch (CI->getPredicate()) {
287 case CmpInst::FCMP_OEQ: {
288 unsigned EReg = createResultReg(&X86::GR8RegClass);
289 unsigned NPReg = createResultReg(&X86::GR8RegClass);
290 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
291 BuildMI(MBB, TII.get(X86::SETEr), EReg);
292 BuildMI(MBB, TII.get(X86::SETNPr), NPReg);
293 BuildMI(MBB, TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg);
294 break;
295 }
296 case CmpInst::FCMP_UNE: {
297 unsigned NEReg = createResultReg(&X86::GR8RegClass);
298 unsigned PReg = createResultReg(&X86::GR8RegClass);
299 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
300 BuildMI(MBB, TII.get(X86::SETNEr), NEReg);
301 BuildMI(MBB, TII.get(X86::SETPr), PReg);
302 BuildMI(MBB, TII.get(X86::OR8rr), ResultReg).addReg(PReg).addReg(NEReg);
303 break;
304 }
305 case CmpInst::FCMP_OGT:
306 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
307 BuildMI(MBB, TII.get(X86::SETAr), ResultReg);
308 break;
309 case CmpInst::FCMP_OGE:
310 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
311 BuildMI(MBB, TII.get(X86::SETAEr), ResultReg);
312 break;
313 case CmpInst::FCMP_OLT:
314 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
315 BuildMI(MBB, TII.get(X86::SETAr), ResultReg);
316 break;
317 case CmpInst::FCMP_OLE:
318 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
319 BuildMI(MBB, TII.get(X86::SETAEr), ResultReg);
320 break;
321 case CmpInst::FCMP_ONE:
322 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
323 BuildMI(MBB, TII.get(X86::SETNEr), ResultReg);
324 break;
325 case CmpInst::FCMP_ORD:
326 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
327 BuildMI(MBB, TII.get(X86::SETNPr), ResultReg);
328 break;
329 case CmpInst::FCMP_UNO:
330 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
331 BuildMI(MBB, TII.get(X86::SETPr), ResultReg);
332 break;
333 case CmpInst::FCMP_UEQ:
334 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
335 BuildMI(MBB, TII.get(X86::SETEr), ResultReg);
336 break;
337 case CmpInst::FCMP_UGT:
338 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
339 BuildMI(MBB, TII.get(X86::SETBr), ResultReg);
340 break;
341 case CmpInst::FCMP_UGE:
342 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
343 BuildMI(MBB, TII.get(X86::SETBEr), ResultReg);
344 break;
345 case CmpInst::FCMP_ULT:
346 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
347 BuildMI(MBB, TII.get(X86::SETBr), ResultReg);
348 break;
349 case CmpInst::FCMP_ULE:
350 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
351 BuildMI(MBB, TII.get(X86::SETBEr), ResultReg);
352 break;
353 case CmpInst::ICMP_EQ:
354 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
355 BuildMI(MBB, TII.get(X86::SETEr), ResultReg);
356 break;
357 case CmpInst::ICMP_NE:
358 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
359 BuildMI(MBB, TII.get(X86::SETNEr), ResultReg);
360 break;
361 case CmpInst::ICMP_UGT:
362 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
363 BuildMI(MBB, TII.get(X86::SETAr), ResultReg);
364 break;
365 case CmpInst::ICMP_UGE:
366 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
367 BuildMI(MBB, TII.get(X86::SETAEr), ResultReg);
368 break;
369 case CmpInst::ICMP_ULT:
370 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
371 BuildMI(MBB, TII.get(X86::SETBr), ResultReg);
372 break;
373 case CmpInst::ICMP_ULE:
374 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
375 BuildMI(MBB, TII.get(X86::SETBEr), ResultReg);
376 break;
377 case CmpInst::ICMP_SGT:
378 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
379 BuildMI(MBB, TII.get(X86::SETGr), ResultReg);
380 break;
381 case CmpInst::ICMP_SGE:
382 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
383 BuildMI(MBB, TII.get(X86::SETGEr), ResultReg);
384 break;
385 case CmpInst::ICMP_SLT:
386 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
387 BuildMI(MBB, TII.get(X86::SETLr), ResultReg);
388 break;
389 case CmpInst::ICMP_SLE:
390 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
391 BuildMI(MBB, TII.get(X86::SETLEr), ResultReg);
392 break;
393 default:
394 return false;
395 }
396
397 UpdateValueMap(I, ResultReg);
398 return true;
399}
Evan Cheng8b19e562008-09-03 06:44:39 +0000400
Dan Gohmand89ae992008-09-05 01:06:14 +0000401bool X86FastISel::X86SelectZExt(Instruction *I) {
402 // Special-case hack: The only i1 values we know how to produce currently
403 // set the upper bits of an i8 value to zero.
404 if (I->getType() == Type::Int8Ty &&
405 I->getOperand(0)->getType() == Type::Int1Ty) {
406 unsigned ResultReg = getRegForValue(I->getOperand(0));
Dan Gohmanf52550b2008-09-05 01:15:35 +0000407 if (ResultReg == 0) return false;
Dan Gohmand89ae992008-09-05 01:06:14 +0000408 UpdateValueMap(I, ResultReg);
409 return true;
410 }
411
412 return false;
413}
414
415bool X86FastISel::X86SelectBranch(Instruction *I) {
416 BranchInst *BI = cast<BranchInst>(I);
417 // Unconditional branches are selected by tablegen-generated code.
418 unsigned OpReg = getRegForValue(BI->getCondition());
Dan Gohmanf52550b2008-09-05 01:15:35 +0000419 if (OpReg == 0) return false;
Dan Gohmand89ae992008-09-05 01:06:14 +0000420 MachineBasicBlock *TrueMBB = MBBMap[BI->getSuccessor(0)];
421 MachineBasicBlock *FalseMBB = MBBMap[BI->getSuccessor(1)];
422
423 BuildMI(MBB, TII.get(X86::TEST8rr)).addReg(OpReg).addReg(OpReg);
424 BuildMI(MBB, TII.get(X86::JNE)).addMBB(TrueMBB);
425 BuildMI(MBB, TII.get(X86::JMP)).addMBB(FalseMBB);
426
427 MBB->addSuccessor(TrueMBB);
428 MBB->addSuccessor(FalseMBB);
429
430 return true;
431}
432
Dan Gohman99b21822008-08-28 23:21:34 +0000433bool
Dan Gohman3df24e62008-09-03 23:12:08 +0000434X86FastISel::TargetSelectInstruction(Instruction *I) {
Dan Gohman99b21822008-08-28 23:21:34 +0000435 switch (I->getOpcode()) {
436 default: break;
Evan Cheng8b19e562008-09-03 06:44:39 +0000437 case Instruction::Load:
Dan Gohman3df24e62008-09-03 23:12:08 +0000438 return X86SelectLoad(I);
Owen Anderson79924eb2008-09-04 16:48:33 +0000439 case Instruction::Store:
440 return X86SelectStore(I);
Dan Gohman6e3f05f2008-09-04 23:26:51 +0000441 case Instruction::ICmp:
442 case Instruction::FCmp:
443 return X86SelectCmp(I);
Dan Gohmand89ae992008-09-05 01:06:14 +0000444 case Instruction::ZExt:
445 return X86SelectZExt(I);
446 case Instruction::Br:
447 return X86SelectBranch(I);
Dan Gohman99b21822008-08-28 23:21:34 +0000448 }
449
450 return false;
451}
452
Owen Anderson95267a12008-09-05 00:06:23 +0000453unsigned X86FastISel::TargetSelectConstantPoolLoad(Constant *C,
454 MachineConstantPool* MCP) {
455 unsigned CPLoad = getRegForValue(C);
456 if (CPLoad != 0)
457 return CPLoad;
458
459 // Can't handle PIC-mode yet.
460 if (TM.getRelocationModel() == Reloc::PIC_)
461 return 0;
462
463 MVT VT = MVT::getMVT(C->getType(), /*HandleUnknown=*/true);
464 if (VT == MVT::Other || !VT.isSimple())
465 // Unhandled type. Halt "fast" selection and bail.
466 return false;
467 if (VT == MVT::iPTR)
468 // Use pointer type.
469 VT = TLI.getPointerTy();
470 // We only handle legal types. For example, on x86-32 the instruction
471 // selector contains all of the 64-bit instructions from x86-64,
472 // under the assumption that i64 won't be used if the target doesn't
473 // support it.
474 if (!TLI.isTypeLegal(VT))
475 return false;
476
477 // Get opcode and regclass of the output for the given load instruction.
478 unsigned Opc = 0;
479 const TargetRegisterClass *RC = NULL;
480 switch (VT.getSimpleVT()) {
481 default: return false;
482 case MVT::i8:
483 Opc = X86::MOV8rm;
484 RC = X86::GR8RegisterClass;
485 break;
486 case MVT::i16:
487 Opc = X86::MOV16rm;
488 RC = X86::GR16RegisterClass;
489 break;
490 case MVT::i32:
491 Opc = X86::MOV32rm;
492 RC = X86::GR32RegisterClass;
493 break;
494 case MVT::i64:
495 // Must be in x86-64 mode.
496 Opc = X86::MOV64rm;
497 RC = X86::GR64RegisterClass;
498 break;
499 case MVT::f32:
500 if (Subtarget->hasSSE1()) {
501 Opc = X86::MOVSSrm;
502 RC = X86::FR32RegisterClass;
503 } else {
504 Opc = X86::LD_Fp32m;
505 RC = X86::RFP32RegisterClass;
506 }
507 break;
508 case MVT::f64:
509 if (Subtarget->hasSSE2()) {
510 Opc = X86::MOVSDrm;
511 RC = X86::FR64RegisterClass;
512 } else {
513 Opc = X86::LD_Fp64m;
514 RC = X86::RFP64RegisterClass;
515 }
516 break;
517 case MVT::f80:
518 Opc = X86::LD_Fp80m;
519 RC = X86::RFP80RegisterClass;
520 break;
521 }
522
523 unsigned ResultReg = createResultReg(RC);
524 if (isa<GlobalValue>(C)) {
525 if (X86SelectConstAddr(C, ResultReg))
526 return ResultReg;
527 else
528 return 0;
529 }
530
531
532 unsigned MCPOffset = MCP->getConstantPoolIndex(C, 0);
533 addConstantPoolReference(BuildMI(MBB, TII.get(Opc), ResultReg), MCPOffset);
534 UpdateValueMap(C, ResultReg);
535 return ResultReg;
536}
537
Evan Chengc3f44b02008-09-03 00:03:49 +0000538namespace llvm {
Dan Gohman3df24e62008-09-03 23:12:08 +0000539 llvm::FastISel *X86::createFastISel(MachineFunction &mf,
540 DenseMap<const Value *, unsigned> &vm,
541 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm) {
542 return new X86FastISel(mf, vm, bm);
Evan Chengc3f44b02008-09-03 00:03:49 +0000543 }
Dan Gohman99b21822008-08-28 23:21:34 +0000544}