blob: bf939ab0fb6ee72902d412f1bef0df4bfde8a75e [file] [log] [blame]
Dan Gohman1adf1b02008-08-19 21:45:35 +00001//===-- X86FastISel.cpp - X86 FastISel implementation ---------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the X86-specific support for the FastISel class. Much
11// of the target-specific code is generated by tablegen in the file
12// X86GenFastISel.inc, which is #included here.
13//
14//===----------------------------------------------------------------------===//
15
16#include "X86.h"
Evan Cheng8b19e562008-09-03 06:44:39 +000017#include "X86InstrBuilder.h"
Dan Gohman1adf1b02008-08-19 21:45:35 +000018#include "X86ISelLowering.h"
Evan Cheng88e30412008-09-03 01:04:47 +000019#include "X86RegisterInfo.h"
20#include "X86Subtarget.h"
Dan Gohman22bb3112008-08-22 00:20:26 +000021#include "X86TargetMachine.h"
Dan Gohmand89ae992008-09-05 01:06:14 +000022#include "llvm/Instructions.h"
Dan Gohman6e3f05f2008-09-04 23:26:51 +000023#include "llvm/DerivedTypes.h"
Evan Chengc3f44b02008-09-03 00:03:49 +000024#include "llvm/CodeGen/FastISel.h"
Owen Anderson95267a12008-09-05 00:06:23 +000025#include "llvm/CodeGen/MachineConstantPool.h"
Owen Anderson667d8f72008-08-29 17:45:56 +000026#include "llvm/CodeGen/MachineRegisterInfo.h"
Evan Chengc3f44b02008-09-03 00:03:49 +000027
28using namespace llvm;
29
30class X86FastISel : public FastISel {
31 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
32 /// make the right decision when generating code for different targets.
33 const X86Subtarget *Subtarget;
34
Evan Cheng8b19e562008-09-03 06:44:39 +000035public:
Dan Gohman3df24e62008-09-03 23:12:08 +000036 explicit X86FastISel(MachineFunction &mf,
37 DenseMap<const Value *, unsigned> &vm,
38 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm)
39 : FastISel(mf, vm, bm) {
Evan Cheng88e30412008-09-03 01:04:47 +000040 Subtarget = &TM.getSubtarget<X86Subtarget>();
41 }
Evan Chengc3f44b02008-09-03 00:03:49 +000042
Dan Gohman3df24e62008-09-03 23:12:08 +000043 virtual bool TargetSelectInstruction(Instruction *I);
Evan Chengc3f44b02008-09-03 00:03:49 +000044
Dan Gohman1adf1b02008-08-19 21:45:35 +000045#include "X86GenFastISel.inc"
Evan Cheng8b19e562008-09-03 06:44:39 +000046
47private:
Evan Cheng0de588f2008-09-05 21:00:03 +000048 bool X86FastEmitLoad(MVT VT, unsigned Op0, Value *V, unsigned &RR);
49
50 bool X86FastEmitStore(MVT VT, unsigned Op0, unsigned Op1, Value *V);
51
Dan Gohman3df24e62008-09-03 23:12:08 +000052 bool X86SelectConstAddr(Value *V, unsigned &Op0);
Evan Cheng8b19e562008-09-03 06:44:39 +000053
Dan Gohman3df24e62008-09-03 23:12:08 +000054 bool X86SelectLoad(Instruction *I);
Owen Andersona3971df2008-09-04 07:08:58 +000055
56 bool X86SelectStore(Instruction *I);
Dan Gohman6e3f05f2008-09-04 23:26:51 +000057
58 bool X86SelectCmp(Instruction *I);
Dan Gohmand89ae992008-09-05 01:06:14 +000059
60 bool X86SelectZExt(Instruction *I);
61
62 bool X86SelectBranch(Instruction *I);
Dan Gohmanc39f4db2008-09-05 18:30:08 +000063
64 bool X86SelectShift(Instruction *I);
65
66 bool X86SelectSelect(Instruction *I);
Evan Cheng0de588f2008-09-05 21:00:03 +000067
Owen Anderson9c7216f2008-09-05 20:49:33 +000068 unsigned TargetMaterializeConstant(Constant *C, MachineConstantPool* MCP);
Evan Chengc3f44b02008-09-03 00:03:49 +000069};
Dan Gohman99b21822008-08-28 23:21:34 +000070
Evan Cheng0de588f2008-09-05 21:00:03 +000071/// X86FastEmitLoad - Emit a machine instruction to load a value of type VT.
72/// The address is either pre-computed, i.e. Op0, or a GlobalAddress, i.e. V.
73/// Return true and the result register by reference if it is possible.
74bool X86FastISel::X86FastEmitLoad(MVT VT, unsigned Op0, Value *V,
75 unsigned &ResultReg) {
76 // Get opcode and regclass of the output for the given load instruction.
77 unsigned Opc = 0;
78 const TargetRegisterClass *RC = NULL;
79 switch (VT.getSimpleVT()) {
80 default: return false;
81 case MVT::i8:
82 Opc = X86::MOV8rm;
83 RC = X86::GR8RegisterClass;
84 break;
85 case MVT::i16:
86 Opc = X86::MOV16rm;
87 RC = X86::GR16RegisterClass;
88 break;
89 case MVT::i32:
90 Opc = X86::MOV32rm;
91 RC = X86::GR32RegisterClass;
92 break;
93 case MVT::i64:
94 // Must be in x86-64 mode.
95 Opc = X86::MOV64rm;
96 RC = X86::GR64RegisterClass;
97 break;
98 case MVT::f32:
99 if (Subtarget->hasSSE1()) {
100 Opc = X86::MOVSSrm;
101 RC = X86::FR32RegisterClass;
102 } else {
103 Opc = X86::LD_Fp32m;
104 RC = X86::RFP32RegisterClass;
105 }
106 break;
107 case MVT::f64:
108 if (Subtarget->hasSSE2()) {
109 Opc = X86::MOVSDrm;
110 RC = X86::FR64RegisterClass;
111 } else {
112 Opc = X86::LD_Fp64m;
113 RC = X86::RFP64RegisterClass;
114 }
115 break;
116 case MVT::f80:
117 Opc = X86::LD_Fp80m;
118 RC = X86::RFP80RegisterClass;
119 break;
120 }
121
122 ResultReg = createResultReg(RC);
123 X86AddressMode AM;
124 if (Op0)
125 // Address is in register.
126 AM.Base.Reg = Op0;
127 else
128 AM.GV = cast<GlobalValue>(V);
129 addFullAddress(BuildMI(MBB, TII.get(Opc), ResultReg), AM);
130 return true;
131}
132
133/// X86FastEmitStore - Emit a machine instruction to store a value Op0 of
134/// type VT. The address is either pre-computed, i.e. Op1, or a GlobalAddress,
135/// i.e. V. Return true if it is possible.
136bool
137X86FastISel::X86FastEmitStore(MVT VT, unsigned Op0, unsigned Op1, Value *V) {
138 // Get opcode and regclass of the output for the given load instruction.
139 unsigned Opc = 0;
140 const TargetRegisterClass *RC = NULL;
141 switch (VT.getSimpleVT()) {
142 default: return false;
143 case MVT::i8:
144 Opc = X86::MOV8mr;
145 RC = X86::GR8RegisterClass;
146 break;
147 case MVT::i16:
148 Opc = X86::MOV16mr;
149 RC = X86::GR16RegisterClass;
150 break;
151 case MVT::i32:
152 Opc = X86::MOV32mr;
153 RC = X86::GR32RegisterClass;
154 break;
155 case MVT::i64:
156 // Must be in x86-64 mode.
157 Opc = X86::MOV64mr;
158 RC = X86::GR64RegisterClass;
159 break;
160 case MVT::f32:
161 if (Subtarget->hasSSE1()) {
162 Opc = X86::MOVSSmr;
163 RC = X86::FR32RegisterClass;
164 } else {
165 Opc = X86::ST_Fp32m;
166 RC = X86::RFP32RegisterClass;
167 }
168 break;
169 case MVT::f64:
170 if (Subtarget->hasSSE2()) {
171 Opc = X86::MOVSDmr;
172 RC = X86::FR64RegisterClass;
173 } else {
174 Opc = X86::ST_Fp64m;
175 RC = X86::RFP64RegisterClass;
176 }
177 break;
178 case MVT::f80:
179 Opc = X86::ST_FP80m;
180 RC = X86::RFP80RegisterClass;
181 break;
182 }
183
184 X86AddressMode AM;
185 if (Op1)
186 // Address is in register.
187 AM.Base.Reg = Op1;
188 else
189 AM.GV = cast<GlobalValue>(V);
190 addFullAddress(BuildMI(MBB, TII.get(Opc)), AM).addReg(Op0);
191 return true;
192}
193
Evan Cheng8b19e562008-09-03 06:44:39 +0000194/// X86SelectConstAddr - Select and emit code to materialize constant address.
195///
196bool X86FastISel::X86SelectConstAddr(Value *V,
Dan Gohman3df24e62008-09-03 23:12:08 +0000197 unsigned &Op0) {
Evan Cheng8b19e562008-09-03 06:44:39 +0000198 // FIXME: Only GlobalAddress for now.
199 GlobalValue *GV = dyn_cast<GlobalValue>(V);
200 if (!GV)
201 return false;
202
203 if (Subtarget->GVRequiresExtraLoad(GV, TM, false)) {
204 // Issue load from stub if necessary.
205 unsigned Opc = 0;
206 const TargetRegisterClass *RC = NULL;
207 if (TLI.getPointerTy() == MVT::i32) {
208 Opc = X86::MOV32rm;
209 RC = X86::GR32RegisterClass;
210 } else {
211 Opc = X86::MOV64rm;
212 RC = X86::GR64RegisterClass;
213 }
214 Op0 = createResultReg(RC);
215 X86AddressMode AM;
216 AM.GV = GV;
217 addFullAddress(BuildMI(MBB, TII.get(Opc), Op0), AM);
Evan Cheng373d50a2008-09-04 06:18:33 +0000218 // Prevent loading GV stub multiple times in same MBB.
219 LocalValueMap[V] = Op0;
Evan Cheng8b19e562008-09-03 06:44:39 +0000220 }
221 return true;
222}
223
Owen Andersona3971df2008-09-04 07:08:58 +0000224/// X86SelectStore - Select and emit code to implement store instructions.
225bool X86FastISel::X86SelectStore(Instruction* I) {
226 MVT VT = MVT::getMVT(I->getOperand(0)->getType());
227 if (VT == MVT::Other || !VT.isSimple())
228 // Unhandled type. Halt "fast" selection and bail.
229 return false;
230 if (VT == MVT::iPTR)
231 // Use pointer type.
232 VT = TLI.getPointerTy();
233 // We only handle legal types. For example, on x86-32 the instruction
234 // selector contains all of the 64-bit instructions from x86-64,
235 // under the assumption that i64 won't be used if the target doesn't
236 // support it.
237 if (!TLI.isTypeLegal(VT))
238 return false;
239 unsigned Op0 = getRegForValue(I->getOperand(0));
240 if (Op0 == 0)
241 // Unhandled operand. Halt "fast" selection and bail.
242 return false;
243
244 Value *V = I->getOperand(1);
245 unsigned Op1 = getRegForValue(V);
246 if (Op1 == 0) {
247 // Handle constant load address.
248 if (!isa<Constant>(V) || !X86SelectConstAddr(V, Op1))
249 // Unhandled operand. Halt "fast" selection and bail.
250 return false;
251 }
Owen Andersona3971df2008-09-04 07:08:58 +0000252
Evan Cheng0de588f2008-09-05 21:00:03 +0000253 return X86FastEmitStore(VT, Op0, Op1, V);
Owen Andersona3971df2008-09-04 07:08:58 +0000254}
255
Evan Cheng8b19e562008-09-03 06:44:39 +0000256/// X86SelectLoad - Select and emit code to implement load instructions.
257///
Dan Gohman3df24e62008-09-03 23:12:08 +0000258bool X86FastISel::X86SelectLoad(Instruction *I) {
Evan Cheng8b19e562008-09-03 06:44:39 +0000259 MVT VT = MVT::getMVT(I->getType(), /*HandleUnknown=*/true);
260 if (VT == MVT::Other || !VT.isSimple())
261 // Unhandled type. Halt "fast" selection and bail.
262 return false;
263 if (VT == MVT::iPTR)
264 // Use pointer type.
265 VT = TLI.getPointerTy();
266 // We only handle legal types. For example, on x86-32 the instruction
267 // selector contains all of the 64-bit instructions from x86-64,
268 // under the assumption that i64 won't be used if the target doesn't
269 // support it.
270 if (!TLI.isTypeLegal(VT))
271 return false;
272
273 Value *V = I->getOperand(0);
Dan Gohman3df24e62008-09-03 23:12:08 +0000274 unsigned Op0 = getRegForValue(V);
Evan Cheng8b19e562008-09-03 06:44:39 +0000275 if (Op0 == 0) {
276 // Handle constant load address.
Evan Cheng0de588f2008-09-05 21:00:03 +0000277 // FIXME: If load type is something we can't handle, this can result in
278 // a dead stub load instruction.
Dan Gohman3df24e62008-09-03 23:12:08 +0000279 if (!isa<Constant>(V) || !X86SelectConstAddr(V, Op0))
Evan Cheng8b19e562008-09-03 06:44:39 +0000280 // Unhandled operand. Halt "fast" selection and bail.
281 return false;
282 }
283
Evan Cheng0de588f2008-09-05 21:00:03 +0000284 unsigned ResultReg = 0;
285 if (X86FastEmitLoad(VT, Op0, V, ResultReg)) {
286 UpdateValueMap(I, ResultReg);
287 return true;
Evan Cheng8b19e562008-09-03 06:44:39 +0000288 }
Evan Cheng0de588f2008-09-05 21:00:03 +0000289 return false;
Evan Cheng8b19e562008-09-03 06:44:39 +0000290}
291
Dan Gohman6e3f05f2008-09-04 23:26:51 +0000292bool X86FastISel::X86SelectCmp(Instruction *I) {
293 CmpInst *CI = cast<CmpInst>(I);
294
Dan Gohman4f22bb02008-09-05 01:33:56 +0000295 MVT VT = TLI.getValueType(I->getOperand(0)->getType());
296 if (!TLI.isTypeLegal(VT))
297 return false;
298
Dan Gohman6e3f05f2008-09-04 23:26:51 +0000299 unsigned Op0Reg = getRegForValue(CI->getOperand(0));
Dan Gohmanf52550b2008-09-05 01:15:35 +0000300 if (Op0Reg == 0) return false;
Dan Gohman6e3f05f2008-09-04 23:26:51 +0000301 unsigned Op1Reg = getRegForValue(CI->getOperand(1));
Dan Gohmanf52550b2008-09-05 01:15:35 +0000302 if (Op1Reg == 0) return false;
303
Dan Gohman6e3f05f2008-09-04 23:26:51 +0000304 unsigned Opc;
Dan Gohmanf52550b2008-09-05 01:15:35 +0000305 switch (VT.getSimpleVT()) {
Dan Gohman6e3f05f2008-09-04 23:26:51 +0000306 case MVT::i8: Opc = X86::CMP8rr; break;
307 case MVT::i16: Opc = X86::CMP16rr; break;
308 case MVT::i32: Opc = X86::CMP32rr; break;
309 case MVT::i64: Opc = X86::CMP64rr; break;
310 case MVT::f32: Opc = X86::UCOMISSrr; break;
311 case MVT::f64: Opc = X86::UCOMISDrr; break;
312 default: return false;
313 }
314
315 unsigned ResultReg = createResultReg(&X86::GR8RegClass);
316 switch (CI->getPredicate()) {
317 case CmpInst::FCMP_OEQ: {
318 unsigned EReg = createResultReg(&X86::GR8RegClass);
319 unsigned NPReg = createResultReg(&X86::GR8RegClass);
320 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
321 BuildMI(MBB, TII.get(X86::SETEr), EReg);
322 BuildMI(MBB, TII.get(X86::SETNPr), NPReg);
323 BuildMI(MBB, TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg);
324 break;
325 }
326 case CmpInst::FCMP_UNE: {
327 unsigned NEReg = createResultReg(&X86::GR8RegClass);
328 unsigned PReg = createResultReg(&X86::GR8RegClass);
329 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
330 BuildMI(MBB, TII.get(X86::SETNEr), NEReg);
331 BuildMI(MBB, TII.get(X86::SETPr), PReg);
332 BuildMI(MBB, TII.get(X86::OR8rr), ResultReg).addReg(PReg).addReg(NEReg);
333 break;
334 }
335 case CmpInst::FCMP_OGT:
336 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
337 BuildMI(MBB, TII.get(X86::SETAr), ResultReg);
338 break;
339 case CmpInst::FCMP_OGE:
340 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
341 BuildMI(MBB, TII.get(X86::SETAEr), ResultReg);
342 break;
343 case CmpInst::FCMP_OLT:
344 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
345 BuildMI(MBB, TII.get(X86::SETAr), ResultReg);
346 break;
347 case CmpInst::FCMP_OLE:
348 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
349 BuildMI(MBB, TII.get(X86::SETAEr), ResultReg);
350 break;
351 case CmpInst::FCMP_ONE:
352 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
353 BuildMI(MBB, TII.get(X86::SETNEr), ResultReg);
354 break;
355 case CmpInst::FCMP_ORD:
356 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
357 BuildMI(MBB, TII.get(X86::SETNPr), ResultReg);
358 break;
359 case CmpInst::FCMP_UNO:
360 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
361 BuildMI(MBB, TII.get(X86::SETPr), ResultReg);
362 break;
363 case CmpInst::FCMP_UEQ:
364 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
365 BuildMI(MBB, TII.get(X86::SETEr), ResultReg);
366 break;
367 case CmpInst::FCMP_UGT:
368 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
369 BuildMI(MBB, TII.get(X86::SETBr), ResultReg);
370 break;
371 case CmpInst::FCMP_UGE:
372 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
373 BuildMI(MBB, TII.get(X86::SETBEr), ResultReg);
374 break;
375 case CmpInst::FCMP_ULT:
376 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
377 BuildMI(MBB, TII.get(X86::SETBr), ResultReg);
378 break;
379 case CmpInst::FCMP_ULE:
380 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
381 BuildMI(MBB, TII.get(X86::SETBEr), ResultReg);
382 break;
383 case CmpInst::ICMP_EQ:
384 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
385 BuildMI(MBB, TII.get(X86::SETEr), ResultReg);
386 break;
387 case CmpInst::ICMP_NE:
388 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
389 BuildMI(MBB, TII.get(X86::SETNEr), ResultReg);
390 break;
391 case CmpInst::ICMP_UGT:
392 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
393 BuildMI(MBB, TII.get(X86::SETAr), ResultReg);
394 break;
395 case CmpInst::ICMP_UGE:
396 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
397 BuildMI(MBB, TII.get(X86::SETAEr), ResultReg);
398 break;
399 case CmpInst::ICMP_ULT:
400 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
401 BuildMI(MBB, TII.get(X86::SETBr), ResultReg);
402 break;
403 case CmpInst::ICMP_ULE:
404 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
405 BuildMI(MBB, TII.get(X86::SETBEr), ResultReg);
406 break;
407 case CmpInst::ICMP_SGT:
408 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
409 BuildMI(MBB, TII.get(X86::SETGr), ResultReg);
410 break;
411 case CmpInst::ICMP_SGE:
412 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
413 BuildMI(MBB, TII.get(X86::SETGEr), ResultReg);
414 break;
415 case CmpInst::ICMP_SLT:
416 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
417 BuildMI(MBB, TII.get(X86::SETLr), ResultReg);
418 break;
419 case CmpInst::ICMP_SLE:
420 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
421 BuildMI(MBB, TII.get(X86::SETLEr), ResultReg);
422 break;
423 default:
424 return false;
425 }
426
427 UpdateValueMap(I, ResultReg);
428 return true;
429}
Evan Cheng8b19e562008-09-03 06:44:39 +0000430
Dan Gohmand89ae992008-09-05 01:06:14 +0000431bool X86FastISel::X86SelectZExt(Instruction *I) {
432 // Special-case hack: The only i1 values we know how to produce currently
433 // set the upper bits of an i8 value to zero.
434 if (I->getType() == Type::Int8Ty &&
435 I->getOperand(0)->getType() == Type::Int1Ty) {
436 unsigned ResultReg = getRegForValue(I->getOperand(0));
Dan Gohmanf52550b2008-09-05 01:15:35 +0000437 if (ResultReg == 0) return false;
Dan Gohmand89ae992008-09-05 01:06:14 +0000438 UpdateValueMap(I, ResultReg);
439 return true;
440 }
441
442 return false;
443}
444
445bool X86FastISel::X86SelectBranch(Instruction *I) {
446 BranchInst *BI = cast<BranchInst>(I);
447 // Unconditional branches are selected by tablegen-generated code.
448 unsigned OpReg = getRegForValue(BI->getCondition());
Dan Gohmanf52550b2008-09-05 01:15:35 +0000449 if (OpReg == 0) return false;
Dan Gohmand89ae992008-09-05 01:06:14 +0000450 MachineBasicBlock *TrueMBB = MBBMap[BI->getSuccessor(0)];
451 MachineBasicBlock *FalseMBB = MBBMap[BI->getSuccessor(1)];
452
453 BuildMI(MBB, TII.get(X86::TEST8rr)).addReg(OpReg).addReg(OpReg);
454 BuildMI(MBB, TII.get(X86::JNE)).addMBB(TrueMBB);
455 BuildMI(MBB, TII.get(X86::JMP)).addMBB(FalseMBB);
456
457 MBB->addSuccessor(TrueMBB);
458 MBB->addSuccessor(FalseMBB);
459
460 return true;
461}
462
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000463bool X86FastISel::X86SelectShift(Instruction *I) {
464 unsigned CReg = 0;
465 unsigned Opc = 0;
466 const TargetRegisterClass *RC = NULL;
467 if (I->getType() == Type::Int8Ty) {
468 CReg = X86::CL;
469 RC = &X86::GR8RegClass;
470 switch (I->getOpcode()) {
Dan Gohman31d26912008-09-05 21:13:04 +0000471 case Instruction::LShr: Opc = X86::SHR8rCL; break;
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000472 case Instruction::AShr: Opc = X86::SAR8rCL; break;
Dan Gohman31d26912008-09-05 21:13:04 +0000473 case Instruction::Shl: Opc = X86::SHL8rCL; break;
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000474 default: return false;
475 }
476 } else if (I->getType() == Type::Int16Ty) {
477 CReg = X86::CX;
478 RC = &X86::GR16RegClass;
479 switch (I->getOpcode()) {
Dan Gohman31d26912008-09-05 21:13:04 +0000480 case Instruction::LShr: Opc = X86::SHR16rCL; break;
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000481 case Instruction::AShr: Opc = X86::SAR16rCL; break;
Dan Gohman31d26912008-09-05 21:13:04 +0000482 case Instruction::Shl: Opc = X86::SHL16rCL; break;
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000483 default: return false;
484 }
485 } else if (I->getType() == Type::Int32Ty) {
486 CReg = X86::ECX;
487 RC = &X86::GR32RegClass;
488 switch (I->getOpcode()) {
Dan Gohman31d26912008-09-05 21:13:04 +0000489 case Instruction::LShr: Opc = X86::SHR32rCL; break;
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000490 case Instruction::AShr: Opc = X86::SAR32rCL; break;
Dan Gohman31d26912008-09-05 21:13:04 +0000491 case Instruction::Shl: Opc = X86::SHL32rCL; break;
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000492 default: return false;
493 }
494 } else if (I->getType() == Type::Int64Ty) {
495 CReg = X86::RCX;
496 RC = &X86::GR64RegClass;
497 switch (I->getOpcode()) {
Dan Gohman31d26912008-09-05 21:13:04 +0000498 case Instruction::LShr: Opc = X86::SHR64rCL; break;
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000499 case Instruction::AShr: Opc = X86::SAR64rCL; break;
Dan Gohman31d26912008-09-05 21:13:04 +0000500 case Instruction::Shl: Opc = X86::SHL64rCL; break;
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000501 default: return false;
502 }
503 } else {
504 return false;
505 }
506
Dan Gohmanf58cb6d2008-09-05 21:27:34 +0000507 MVT VT = MVT::getMVT(I->getType(), /*HandleUnknown=*/true);
508 if (VT == MVT::Other || !TLI.isTypeLegal(VT))
509 return false;
510
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000511 unsigned Op0Reg = getRegForValue(I->getOperand(0));
512 if (Op0Reg == 0) return false;
513 unsigned Op1Reg = getRegForValue(I->getOperand(1));
514 if (Op1Reg == 0) return false;
515 TII.copyRegToReg(*MBB, MBB->end(), CReg, Op1Reg, RC, RC);
516 unsigned ResultReg = createResultReg(RC);
517 BuildMI(MBB, TII.get(Opc), ResultReg).addReg(Op0Reg);
518 UpdateValueMap(I, ResultReg);
519 return true;
520}
521
522bool X86FastISel::X86SelectSelect(Instruction *I) {
Dan Gohmanf58cb6d2008-09-05 21:27:34 +0000523 const Type *Ty = I->getType();
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000524 if (isa<PointerType>(Ty))
525 Ty = TLI.getTargetData()->getIntPtrType();
526
527 unsigned Opc = 0;
528 const TargetRegisterClass *RC = NULL;
529 if (Ty == Type::Int16Ty) {
Dan Gohman31d26912008-09-05 21:13:04 +0000530 Opc = X86::CMOVE16rr;
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000531 RC = &X86::GR16RegClass;
532 } else if (Ty == Type::Int32Ty) {
Dan Gohman31d26912008-09-05 21:13:04 +0000533 Opc = X86::CMOVE32rr;
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000534 RC = &X86::GR32RegClass;
535 } else if (Ty == Type::Int64Ty) {
Dan Gohman31d26912008-09-05 21:13:04 +0000536 Opc = X86::CMOVE64rr;
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000537 RC = &X86::GR64RegClass;
538 } else {
539 return false;
540 }
541
Dan Gohmanf58cb6d2008-09-05 21:27:34 +0000542 MVT VT = MVT::getMVT(Ty, /*HandleUnknown=*/true);
543 if (VT == MVT::Other || !TLI.isTypeLegal(VT))
544 return false;
545
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000546 unsigned Op0Reg = getRegForValue(I->getOperand(0));
547 if (Op0Reg == 0) return false;
548 unsigned Op1Reg = getRegForValue(I->getOperand(1));
549 if (Op1Reg == 0) return false;
550 unsigned Op2Reg = getRegForValue(I->getOperand(2));
551 if (Op2Reg == 0) return false;
552
553 BuildMI(MBB, TII.get(X86::TEST8rr)).addReg(Op0Reg).addReg(Op0Reg);
554 unsigned ResultReg = createResultReg(RC);
555 BuildMI(MBB, TII.get(Opc), ResultReg).addReg(Op1Reg).addReg(Op2Reg);
556 UpdateValueMap(I, ResultReg);
557 return true;
558}
559
Dan Gohman99b21822008-08-28 23:21:34 +0000560bool
Dan Gohman3df24e62008-09-03 23:12:08 +0000561X86FastISel::TargetSelectInstruction(Instruction *I) {
Dan Gohman99b21822008-08-28 23:21:34 +0000562 switch (I->getOpcode()) {
563 default: break;
Evan Cheng8b19e562008-09-03 06:44:39 +0000564 case Instruction::Load:
Dan Gohman3df24e62008-09-03 23:12:08 +0000565 return X86SelectLoad(I);
Owen Anderson79924eb2008-09-04 16:48:33 +0000566 case Instruction::Store:
567 return X86SelectStore(I);
Dan Gohman6e3f05f2008-09-04 23:26:51 +0000568 case Instruction::ICmp:
569 case Instruction::FCmp:
570 return X86SelectCmp(I);
Dan Gohmand89ae992008-09-05 01:06:14 +0000571 case Instruction::ZExt:
572 return X86SelectZExt(I);
573 case Instruction::Br:
574 return X86SelectBranch(I);
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000575 case Instruction::LShr:
576 case Instruction::AShr:
577 case Instruction::Shl:
578 return X86SelectShift(I);
579 case Instruction::Select:
580 return X86SelectSelect(I);
Dan Gohman99b21822008-08-28 23:21:34 +0000581 }
582
583 return false;
584}
585
Owen Anderson9c7216f2008-09-05 20:49:33 +0000586unsigned X86FastISel::TargetMaterializeConstant(Constant *C,
587 MachineConstantPool* MCP) {
Owen Anderson95267a12008-09-05 00:06:23 +0000588 unsigned CPLoad = getRegForValue(C);
589 if (CPLoad != 0)
590 return CPLoad;
591
592 // Can't handle PIC-mode yet.
593 if (TM.getRelocationModel() == Reloc::PIC_)
594 return 0;
595
596 MVT VT = MVT::getMVT(C->getType(), /*HandleUnknown=*/true);
597 if (VT == MVT::Other || !VT.isSimple())
598 // Unhandled type. Halt "fast" selection and bail.
599 return false;
600 if (VT == MVT::iPTR)
601 // Use pointer type.
602 VT = TLI.getPointerTy();
603 // We only handle legal types. For example, on x86-32 the instruction
604 // selector contains all of the 64-bit instructions from x86-64,
605 // under the assumption that i64 won't be used if the target doesn't
606 // support it.
607 if (!TLI.isTypeLegal(VT))
608 return false;
609
610 // Get opcode and regclass of the output for the given load instruction.
611 unsigned Opc = 0;
612 const TargetRegisterClass *RC = NULL;
613 switch (VT.getSimpleVT()) {
614 default: return false;
615 case MVT::i8:
616 Opc = X86::MOV8rm;
617 RC = X86::GR8RegisterClass;
618 break;
619 case MVT::i16:
620 Opc = X86::MOV16rm;
621 RC = X86::GR16RegisterClass;
622 break;
623 case MVT::i32:
624 Opc = X86::MOV32rm;
625 RC = X86::GR32RegisterClass;
626 break;
627 case MVT::i64:
628 // Must be in x86-64 mode.
629 Opc = X86::MOV64rm;
630 RC = X86::GR64RegisterClass;
631 break;
632 case MVT::f32:
633 if (Subtarget->hasSSE1()) {
634 Opc = X86::MOVSSrm;
635 RC = X86::FR32RegisterClass;
636 } else {
637 Opc = X86::LD_Fp32m;
638 RC = X86::RFP32RegisterClass;
639 }
640 break;
641 case MVT::f64:
642 if (Subtarget->hasSSE2()) {
643 Opc = X86::MOVSDrm;
644 RC = X86::FR64RegisterClass;
645 } else {
646 Opc = X86::LD_Fp64m;
647 RC = X86::RFP64RegisterClass;
648 }
649 break;
650 case MVT::f80:
651 Opc = X86::LD_Fp80m;
652 RC = X86::RFP80RegisterClass;
653 break;
654 }
655
656 unsigned ResultReg = createResultReg(RC);
657 if (isa<GlobalValue>(C)) {
Evan Cheng0de588f2008-09-05 21:00:03 +0000658 // FIXME: If store value type is something we can't handle, this can result
659 // in a dead stub load instruction.
Owen Anderson95267a12008-09-05 00:06:23 +0000660 if (X86SelectConstAddr(C, ResultReg))
661 return ResultReg;
Evan Cheng0de588f2008-09-05 21:00:03 +0000662 return 0;
Owen Anderson95267a12008-09-05 00:06:23 +0000663 }
664
665
666 unsigned MCPOffset = MCP->getConstantPoolIndex(C, 0);
667 addConstantPoolReference(BuildMI(MBB, TII.get(Opc), ResultReg), MCPOffset);
668 UpdateValueMap(C, ResultReg);
669 return ResultReg;
670}
671
Evan Chengc3f44b02008-09-03 00:03:49 +0000672namespace llvm {
Dan Gohman3df24e62008-09-03 23:12:08 +0000673 llvm::FastISel *X86::createFastISel(MachineFunction &mf,
674 DenseMap<const Value *, unsigned> &vm,
675 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm) {
676 return new X86FastISel(mf, vm, bm);
Evan Chengc3f44b02008-09-03 00:03:49 +0000677 }
Dan Gohman99b21822008-08-28 23:21:34 +0000678}