blob: ef813e622a2eacb471a76826877243bd60479710 [file] [log] [blame]
Dan Gohman1adf1b02008-08-19 21:45:35 +00001//===-- X86FastISel.cpp - X86 FastISel implementation ---------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the X86-specific support for the FastISel class. Much
11// of the target-specific code is generated by tablegen in the file
12// X86GenFastISel.inc, which is #included here.
13//
14//===----------------------------------------------------------------------===//
15
16#include "X86.h"
Evan Cheng8b19e562008-09-03 06:44:39 +000017#include "X86InstrBuilder.h"
Dan Gohman1adf1b02008-08-19 21:45:35 +000018#include "X86ISelLowering.h"
Evan Cheng88e30412008-09-03 01:04:47 +000019#include "X86RegisterInfo.h"
20#include "X86Subtarget.h"
Dan Gohman22bb3112008-08-22 00:20:26 +000021#include "X86TargetMachine.h"
Dan Gohmand89ae992008-09-05 01:06:14 +000022#include "llvm/Instructions.h"
Dan Gohman6e3f05f2008-09-04 23:26:51 +000023#include "llvm/DerivedTypes.h"
Evan Chengc3f44b02008-09-03 00:03:49 +000024#include "llvm/CodeGen/FastISel.h"
Owen Anderson95267a12008-09-05 00:06:23 +000025#include "llvm/CodeGen/MachineConstantPool.h"
Owen Anderson667d8f72008-08-29 17:45:56 +000026#include "llvm/CodeGen/MachineRegisterInfo.h"
Evan Chengc3f44b02008-09-03 00:03:49 +000027
28using namespace llvm;
29
30class X86FastISel : public FastISel {
31 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
32 /// make the right decision when generating code for different targets.
33 const X86Subtarget *Subtarget;
34
Evan Cheng8b19e562008-09-03 06:44:39 +000035public:
Dan Gohman3df24e62008-09-03 23:12:08 +000036 explicit X86FastISel(MachineFunction &mf,
37 DenseMap<const Value *, unsigned> &vm,
38 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm)
39 : FastISel(mf, vm, bm) {
Evan Cheng88e30412008-09-03 01:04:47 +000040 Subtarget = &TM.getSubtarget<X86Subtarget>();
41 }
Evan Chengc3f44b02008-09-03 00:03:49 +000042
Dan Gohman3df24e62008-09-03 23:12:08 +000043 virtual bool TargetSelectInstruction(Instruction *I);
Evan Chengc3f44b02008-09-03 00:03:49 +000044
Dan Gohman1adf1b02008-08-19 21:45:35 +000045#include "X86GenFastISel.inc"
Evan Cheng8b19e562008-09-03 06:44:39 +000046
47private:
Dan Gohman3df24e62008-09-03 23:12:08 +000048 bool X86SelectConstAddr(Value *V, unsigned &Op0);
Evan Cheng8b19e562008-09-03 06:44:39 +000049
Dan Gohman3df24e62008-09-03 23:12:08 +000050 bool X86SelectLoad(Instruction *I);
Owen Andersona3971df2008-09-04 07:08:58 +000051
52 bool X86SelectStore(Instruction *I);
Dan Gohman6e3f05f2008-09-04 23:26:51 +000053
54 bool X86SelectCmp(Instruction *I);
Dan Gohmand89ae992008-09-05 01:06:14 +000055
56 bool X86SelectZExt(Instruction *I);
57
58 bool X86SelectBranch(Instruction *I);
Owen Anderson95267a12008-09-05 00:06:23 +000059
60 unsigned TargetSelectConstantPoolLoad(Constant *C, MachineConstantPool* MCP);
Evan Chengc3f44b02008-09-03 00:03:49 +000061};
Dan Gohman99b21822008-08-28 23:21:34 +000062
Evan Cheng8b19e562008-09-03 06:44:39 +000063/// X86SelectConstAddr - Select and emit code to materialize constant address.
64///
65bool X86FastISel::X86SelectConstAddr(Value *V,
Dan Gohman3df24e62008-09-03 23:12:08 +000066 unsigned &Op0) {
Evan Cheng8b19e562008-09-03 06:44:39 +000067 // FIXME: Only GlobalAddress for now.
68 GlobalValue *GV = dyn_cast<GlobalValue>(V);
69 if (!GV)
70 return false;
71
72 if (Subtarget->GVRequiresExtraLoad(GV, TM, false)) {
73 // Issue load from stub if necessary.
74 unsigned Opc = 0;
75 const TargetRegisterClass *RC = NULL;
76 if (TLI.getPointerTy() == MVT::i32) {
77 Opc = X86::MOV32rm;
78 RC = X86::GR32RegisterClass;
79 } else {
80 Opc = X86::MOV64rm;
81 RC = X86::GR64RegisterClass;
82 }
83 Op0 = createResultReg(RC);
84 X86AddressMode AM;
85 AM.GV = GV;
86 addFullAddress(BuildMI(MBB, TII.get(Opc), Op0), AM);
Evan Cheng373d50a2008-09-04 06:18:33 +000087 // Prevent loading GV stub multiple times in same MBB.
88 LocalValueMap[V] = Op0;
Evan Cheng8b19e562008-09-03 06:44:39 +000089 }
90 return true;
91}
92
Owen Andersona3971df2008-09-04 07:08:58 +000093/// X86SelectStore - Select and emit code to implement store instructions.
94bool X86FastISel::X86SelectStore(Instruction* I) {
95 MVT VT = MVT::getMVT(I->getOperand(0)->getType());
96 if (VT == MVT::Other || !VT.isSimple())
97 // Unhandled type. Halt "fast" selection and bail.
98 return false;
99 if (VT == MVT::iPTR)
100 // Use pointer type.
101 VT = TLI.getPointerTy();
102 // We only handle legal types. For example, on x86-32 the instruction
103 // selector contains all of the 64-bit instructions from x86-64,
104 // under the assumption that i64 won't be used if the target doesn't
105 // support it.
106 if (!TLI.isTypeLegal(VT))
107 return false;
108 unsigned Op0 = getRegForValue(I->getOperand(0));
109 if (Op0 == 0)
110 // Unhandled operand. Halt "fast" selection and bail.
111 return false;
112
113 Value *V = I->getOperand(1);
114 unsigned Op1 = getRegForValue(V);
115 if (Op1 == 0) {
116 // Handle constant load address.
117 if (!isa<Constant>(V) || !X86SelectConstAddr(V, Op1))
118 // Unhandled operand. Halt "fast" selection and bail.
119 return false;
120 }
121
122 // Get opcode and regclass of the output for the given load instruction.
123 unsigned Opc = 0;
124 const TargetRegisterClass *RC = NULL;
125 switch (VT.getSimpleVT()) {
126 default: return false;
127 case MVT::i8:
128 Opc = X86::MOV8mr;
129 RC = X86::GR8RegisterClass;
130 break;
131 case MVT::i16:
132 Opc = X86::MOV16mr;
133 RC = X86::GR16RegisterClass;
134 break;
135 case MVT::i32:
136 Opc = X86::MOV32mr;
137 RC = X86::GR32RegisterClass;
138 break;
139 case MVT::i64:
140 // Must be in x86-64 mode.
141 Opc = X86::MOV64mr;
142 RC = X86::GR64RegisterClass;
143 break;
144 case MVT::f32:
145 if (Subtarget->hasSSE1()) {
146 Opc = X86::MOVSSmr;
147 RC = X86::FR32RegisterClass;
148 } else {
149 Opc = X86::ST_Fp32m;
150 RC = X86::RFP32RegisterClass;
151 }
152 break;
153 case MVT::f64:
154 if (Subtarget->hasSSE2()) {
155 Opc = X86::MOVSDmr;
156 RC = X86::FR64RegisterClass;
157 } else {
158 Opc = X86::ST_Fp64m;
159 RC = X86::RFP64RegisterClass;
160 }
161 break;
162 case MVT::f80:
163 Opc = X86::ST_FP80m;
164 RC = X86::RFP80RegisterClass;
165 break;
166 }
167
168 X86AddressMode AM;
169 if (Op1)
170 // Address is in register.
Owen Anderson79924eb2008-09-04 16:48:33 +0000171 AM.Base.Reg = Op1;
Owen Andersona3971df2008-09-04 07:08:58 +0000172 else
173 AM.GV = cast<GlobalValue>(V);
Owen Anderson79924eb2008-09-04 16:48:33 +0000174 addFullAddress(BuildMI(MBB, TII.get(Opc)), AM).addReg(Op0);
Owen Andersona3971df2008-09-04 07:08:58 +0000175 return true;
176}
177
Evan Cheng8b19e562008-09-03 06:44:39 +0000178/// X86SelectLoad - Select and emit code to implement load instructions.
179///
Dan Gohman3df24e62008-09-03 23:12:08 +0000180bool X86FastISel::X86SelectLoad(Instruction *I) {
Evan Cheng8b19e562008-09-03 06:44:39 +0000181 MVT VT = MVT::getMVT(I->getType(), /*HandleUnknown=*/true);
182 if (VT == MVT::Other || !VT.isSimple())
183 // Unhandled type. Halt "fast" selection and bail.
184 return false;
185 if (VT == MVT::iPTR)
186 // Use pointer type.
187 VT = TLI.getPointerTy();
188 // We only handle legal types. For example, on x86-32 the instruction
189 // selector contains all of the 64-bit instructions from x86-64,
190 // under the assumption that i64 won't be used if the target doesn't
191 // support it.
192 if (!TLI.isTypeLegal(VT))
193 return false;
194
195 Value *V = I->getOperand(0);
Dan Gohman3df24e62008-09-03 23:12:08 +0000196 unsigned Op0 = getRegForValue(V);
Evan Cheng8b19e562008-09-03 06:44:39 +0000197 if (Op0 == 0) {
198 // Handle constant load address.
Dan Gohman3df24e62008-09-03 23:12:08 +0000199 if (!isa<Constant>(V) || !X86SelectConstAddr(V, Op0))
Evan Cheng8b19e562008-09-03 06:44:39 +0000200 // Unhandled operand. Halt "fast" selection and bail.
201 return false;
202 }
203
204 // Get opcode and regclass of the output for the given load instruction.
205 unsigned Opc = 0;
206 const TargetRegisterClass *RC = NULL;
207 switch (VT.getSimpleVT()) {
208 default: return false;
209 case MVT::i8:
210 Opc = X86::MOV8rm;
211 RC = X86::GR8RegisterClass;
212 break;
213 case MVT::i16:
214 Opc = X86::MOV16rm;
215 RC = X86::GR16RegisterClass;
216 break;
217 case MVT::i32:
218 Opc = X86::MOV32rm;
219 RC = X86::GR32RegisterClass;
220 break;
221 case MVT::i64:
222 // Must be in x86-64 mode.
223 Opc = X86::MOV64rm;
224 RC = X86::GR64RegisterClass;
225 break;
226 case MVT::f32:
227 if (Subtarget->hasSSE1()) {
228 Opc = X86::MOVSSrm;
229 RC = X86::FR32RegisterClass;
230 } else {
231 Opc = X86::LD_Fp32m;
232 RC = X86::RFP32RegisterClass;
233 }
234 break;
235 case MVT::f64:
236 if (Subtarget->hasSSE2()) {
237 Opc = X86::MOVSDrm;
238 RC = X86::FR64RegisterClass;
239 } else {
240 Opc = X86::LD_Fp64m;
241 RC = X86::RFP64RegisterClass;
242 }
243 break;
244 case MVT::f80:
245 Opc = X86::LD_Fp80m;
246 RC = X86::RFP80RegisterClass;
247 break;
248 }
249
250 unsigned ResultReg = createResultReg(RC);
251 X86AddressMode AM;
252 if (Op0)
253 // Address is in register.
254 AM.Base.Reg = Op0;
255 else
256 AM.GV = cast<GlobalValue>(V);
257 addFullAddress(BuildMI(MBB, TII.get(Opc), ResultReg), AM);
Dan Gohman3df24e62008-09-03 23:12:08 +0000258 UpdateValueMap(I, ResultReg);
Evan Cheng8b19e562008-09-03 06:44:39 +0000259 return true;
260}
261
Dan Gohman6e3f05f2008-09-04 23:26:51 +0000262bool X86FastISel::X86SelectCmp(Instruction *I) {
263 CmpInst *CI = cast<CmpInst>(I);
264
265 unsigned Op0Reg = getRegForValue(CI->getOperand(0));
266 unsigned Op1Reg = getRegForValue(CI->getOperand(1));
267
268 unsigned Opc;
269 switch (TLI.getValueType(I->getOperand(0)->getType()).getSimpleVT()) {
270 case MVT::i8: Opc = X86::CMP8rr; break;
271 case MVT::i16: Opc = X86::CMP16rr; break;
272 case MVT::i32: Opc = X86::CMP32rr; break;
273 case MVT::i64: Opc = X86::CMP64rr; break;
274 case MVT::f32: Opc = X86::UCOMISSrr; break;
275 case MVT::f64: Opc = X86::UCOMISDrr; break;
276 default: return false;
277 }
278
279 unsigned ResultReg = createResultReg(&X86::GR8RegClass);
280 switch (CI->getPredicate()) {
281 case CmpInst::FCMP_OEQ: {
282 unsigned EReg = createResultReg(&X86::GR8RegClass);
283 unsigned NPReg = createResultReg(&X86::GR8RegClass);
284 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
285 BuildMI(MBB, TII.get(X86::SETEr), EReg);
286 BuildMI(MBB, TII.get(X86::SETNPr), NPReg);
287 BuildMI(MBB, TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg);
288 break;
289 }
290 case CmpInst::FCMP_UNE: {
291 unsigned NEReg = createResultReg(&X86::GR8RegClass);
292 unsigned PReg = createResultReg(&X86::GR8RegClass);
293 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
294 BuildMI(MBB, TII.get(X86::SETNEr), NEReg);
295 BuildMI(MBB, TII.get(X86::SETPr), PReg);
296 BuildMI(MBB, TII.get(X86::OR8rr), ResultReg).addReg(PReg).addReg(NEReg);
297 break;
298 }
299 case CmpInst::FCMP_OGT:
300 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
301 BuildMI(MBB, TII.get(X86::SETAr), ResultReg);
302 break;
303 case CmpInst::FCMP_OGE:
304 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
305 BuildMI(MBB, TII.get(X86::SETAEr), ResultReg);
306 break;
307 case CmpInst::FCMP_OLT:
308 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
309 BuildMI(MBB, TII.get(X86::SETAr), ResultReg);
310 break;
311 case CmpInst::FCMP_OLE:
312 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
313 BuildMI(MBB, TII.get(X86::SETAEr), ResultReg);
314 break;
315 case CmpInst::FCMP_ONE:
316 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
317 BuildMI(MBB, TII.get(X86::SETNEr), ResultReg);
318 break;
319 case CmpInst::FCMP_ORD:
320 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
321 BuildMI(MBB, TII.get(X86::SETNPr), ResultReg);
322 break;
323 case CmpInst::FCMP_UNO:
324 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
325 BuildMI(MBB, TII.get(X86::SETPr), ResultReg);
326 break;
327 case CmpInst::FCMP_UEQ:
328 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
329 BuildMI(MBB, TII.get(X86::SETEr), ResultReg);
330 break;
331 case CmpInst::FCMP_UGT:
332 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
333 BuildMI(MBB, TII.get(X86::SETBr), ResultReg);
334 break;
335 case CmpInst::FCMP_UGE:
336 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
337 BuildMI(MBB, TII.get(X86::SETBEr), ResultReg);
338 break;
339 case CmpInst::FCMP_ULT:
340 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
341 BuildMI(MBB, TII.get(X86::SETBr), ResultReg);
342 break;
343 case CmpInst::FCMP_ULE:
344 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
345 BuildMI(MBB, TII.get(X86::SETBEr), ResultReg);
346 break;
347 case CmpInst::ICMP_EQ:
348 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
349 BuildMI(MBB, TII.get(X86::SETEr), ResultReg);
350 break;
351 case CmpInst::ICMP_NE:
352 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
353 BuildMI(MBB, TII.get(X86::SETNEr), ResultReg);
354 break;
355 case CmpInst::ICMP_UGT:
356 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
357 BuildMI(MBB, TII.get(X86::SETAr), ResultReg);
358 break;
359 case CmpInst::ICMP_UGE:
360 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
361 BuildMI(MBB, TII.get(X86::SETAEr), ResultReg);
362 break;
363 case CmpInst::ICMP_ULT:
364 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
365 BuildMI(MBB, TII.get(X86::SETBr), ResultReg);
366 break;
367 case CmpInst::ICMP_ULE:
368 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
369 BuildMI(MBB, TII.get(X86::SETBEr), ResultReg);
370 break;
371 case CmpInst::ICMP_SGT:
372 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
373 BuildMI(MBB, TII.get(X86::SETGr), ResultReg);
374 break;
375 case CmpInst::ICMP_SGE:
376 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
377 BuildMI(MBB, TII.get(X86::SETGEr), ResultReg);
378 break;
379 case CmpInst::ICMP_SLT:
380 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
381 BuildMI(MBB, TII.get(X86::SETLr), ResultReg);
382 break;
383 case CmpInst::ICMP_SLE:
384 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
385 BuildMI(MBB, TII.get(X86::SETLEr), ResultReg);
386 break;
387 default:
388 return false;
389 }
390
391 UpdateValueMap(I, ResultReg);
392 return true;
393}
Evan Cheng8b19e562008-09-03 06:44:39 +0000394
Dan Gohmand89ae992008-09-05 01:06:14 +0000395bool X86FastISel::X86SelectZExt(Instruction *I) {
396 // Special-case hack: The only i1 values we know how to produce currently
397 // set the upper bits of an i8 value to zero.
398 if (I->getType() == Type::Int8Ty &&
399 I->getOperand(0)->getType() == Type::Int1Ty) {
400 unsigned ResultReg = getRegForValue(I->getOperand(0));
401 UpdateValueMap(I, ResultReg);
402 return true;
403 }
404
405 return false;
406}
407
408bool X86FastISel::X86SelectBranch(Instruction *I) {
409 BranchInst *BI = cast<BranchInst>(I);
410 // Unconditional branches are selected by tablegen-generated code.
411 unsigned OpReg = getRegForValue(BI->getCondition());
412 MachineBasicBlock *TrueMBB = MBBMap[BI->getSuccessor(0)];
413 MachineBasicBlock *FalseMBB = MBBMap[BI->getSuccessor(1)];
414
415 BuildMI(MBB, TII.get(X86::TEST8rr)).addReg(OpReg).addReg(OpReg);
416 BuildMI(MBB, TII.get(X86::JNE)).addMBB(TrueMBB);
417 BuildMI(MBB, TII.get(X86::JMP)).addMBB(FalseMBB);
418
419 MBB->addSuccessor(TrueMBB);
420 MBB->addSuccessor(FalseMBB);
421
422 return true;
423}
424
Dan Gohman99b21822008-08-28 23:21:34 +0000425bool
Dan Gohman3df24e62008-09-03 23:12:08 +0000426X86FastISel::TargetSelectInstruction(Instruction *I) {
Dan Gohman99b21822008-08-28 23:21:34 +0000427 switch (I->getOpcode()) {
428 default: break;
Evan Cheng8b19e562008-09-03 06:44:39 +0000429 case Instruction::Load:
Dan Gohman3df24e62008-09-03 23:12:08 +0000430 return X86SelectLoad(I);
Owen Anderson79924eb2008-09-04 16:48:33 +0000431 case Instruction::Store:
432 return X86SelectStore(I);
Dan Gohman6e3f05f2008-09-04 23:26:51 +0000433 case Instruction::ICmp:
434 case Instruction::FCmp:
435 return X86SelectCmp(I);
Dan Gohmand89ae992008-09-05 01:06:14 +0000436 case Instruction::ZExt:
437 return X86SelectZExt(I);
438 case Instruction::Br:
439 return X86SelectBranch(I);
Dan Gohman99b21822008-08-28 23:21:34 +0000440 }
441
442 return false;
443}
444
Owen Anderson95267a12008-09-05 00:06:23 +0000445unsigned X86FastISel::TargetSelectConstantPoolLoad(Constant *C,
446 MachineConstantPool* MCP) {
447 unsigned CPLoad = getRegForValue(C);
448 if (CPLoad != 0)
449 return CPLoad;
450
451 // Can't handle PIC-mode yet.
452 if (TM.getRelocationModel() == Reloc::PIC_)
453 return 0;
454
455 MVT VT = MVT::getMVT(C->getType(), /*HandleUnknown=*/true);
456 if (VT == MVT::Other || !VT.isSimple())
457 // Unhandled type. Halt "fast" selection and bail.
458 return false;
459 if (VT == MVT::iPTR)
460 // Use pointer type.
461 VT = TLI.getPointerTy();
462 // We only handle legal types. For example, on x86-32 the instruction
463 // selector contains all of the 64-bit instructions from x86-64,
464 // under the assumption that i64 won't be used if the target doesn't
465 // support it.
466 if (!TLI.isTypeLegal(VT))
467 return false;
468
469 // Get opcode and regclass of the output for the given load instruction.
470 unsigned Opc = 0;
471 const TargetRegisterClass *RC = NULL;
472 switch (VT.getSimpleVT()) {
473 default: return false;
474 case MVT::i8:
475 Opc = X86::MOV8rm;
476 RC = X86::GR8RegisterClass;
477 break;
478 case MVT::i16:
479 Opc = X86::MOV16rm;
480 RC = X86::GR16RegisterClass;
481 break;
482 case MVT::i32:
483 Opc = X86::MOV32rm;
484 RC = X86::GR32RegisterClass;
485 break;
486 case MVT::i64:
487 // Must be in x86-64 mode.
488 Opc = X86::MOV64rm;
489 RC = X86::GR64RegisterClass;
490 break;
491 case MVT::f32:
492 if (Subtarget->hasSSE1()) {
493 Opc = X86::MOVSSrm;
494 RC = X86::FR32RegisterClass;
495 } else {
496 Opc = X86::LD_Fp32m;
497 RC = X86::RFP32RegisterClass;
498 }
499 break;
500 case MVT::f64:
501 if (Subtarget->hasSSE2()) {
502 Opc = X86::MOVSDrm;
503 RC = X86::FR64RegisterClass;
504 } else {
505 Opc = X86::LD_Fp64m;
506 RC = X86::RFP64RegisterClass;
507 }
508 break;
509 case MVT::f80:
510 Opc = X86::LD_Fp80m;
511 RC = X86::RFP80RegisterClass;
512 break;
513 }
514
515 unsigned ResultReg = createResultReg(RC);
516 if (isa<GlobalValue>(C)) {
517 if (X86SelectConstAddr(C, ResultReg))
518 return ResultReg;
519 else
520 return 0;
521 }
522
523
524 unsigned MCPOffset = MCP->getConstantPoolIndex(C, 0);
525 addConstantPoolReference(BuildMI(MBB, TII.get(Opc), ResultReg), MCPOffset);
526 UpdateValueMap(C, ResultReg);
527 return ResultReg;
528}
529
Evan Chengc3f44b02008-09-03 00:03:49 +0000530namespace llvm {
Dan Gohman3df24e62008-09-03 23:12:08 +0000531 llvm::FastISel *X86::createFastISel(MachineFunction &mf,
532 DenseMap<const Value *, unsigned> &vm,
533 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm) {
534 return new X86FastISel(mf, vm, bm);
Evan Chengc3f44b02008-09-03 00:03:49 +0000535 }
Dan Gohman99b21822008-08-28 23:21:34 +0000536}