blob: ff8ce88f1f40f04365e0ed7c6bc962763b8ea6b0 [file] [log] [blame]
Dan Gohman1adf1b02008-08-19 21:45:35 +00001//===-- X86FastISel.cpp - X86 FastISel implementation ---------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the X86-specific support for the FastISel class. Much
11// of the target-specific code is generated by tablegen in the file
12// X86GenFastISel.inc, which is #included here.
13//
14//===----------------------------------------------------------------------===//
15
16#include "X86.h"
Evan Cheng8b19e562008-09-03 06:44:39 +000017#include "X86InstrBuilder.h"
Dan Gohman1adf1b02008-08-19 21:45:35 +000018#include "X86ISelLowering.h"
Evan Cheng88e30412008-09-03 01:04:47 +000019#include "X86RegisterInfo.h"
20#include "X86Subtarget.h"
Dan Gohman22bb3112008-08-22 00:20:26 +000021#include "X86TargetMachine.h"
Dan Gohmand89ae992008-09-05 01:06:14 +000022#include "llvm/Instructions.h"
Dan Gohman6e3f05f2008-09-04 23:26:51 +000023#include "llvm/DerivedTypes.h"
Evan Chengc3f44b02008-09-03 00:03:49 +000024#include "llvm/CodeGen/FastISel.h"
Owen Anderson95267a12008-09-05 00:06:23 +000025#include "llvm/CodeGen/MachineConstantPool.h"
Owen Anderson667d8f72008-08-29 17:45:56 +000026#include "llvm/CodeGen/MachineRegisterInfo.h"
Evan Chengc3f44b02008-09-03 00:03:49 +000027
28using namespace llvm;
29
30class X86FastISel : public FastISel {
31 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
32 /// make the right decision when generating code for different targets.
33 const X86Subtarget *Subtarget;
34
Evan Cheng8b19e562008-09-03 06:44:39 +000035public:
Dan Gohman3df24e62008-09-03 23:12:08 +000036 explicit X86FastISel(MachineFunction &mf,
37 DenseMap<const Value *, unsigned> &vm,
38 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm)
39 : FastISel(mf, vm, bm) {
Evan Cheng88e30412008-09-03 01:04:47 +000040 Subtarget = &TM.getSubtarget<X86Subtarget>();
41 }
Evan Chengc3f44b02008-09-03 00:03:49 +000042
Dan Gohman3df24e62008-09-03 23:12:08 +000043 virtual bool TargetSelectInstruction(Instruction *I);
Evan Chengc3f44b02008-09-03 00:03:49 +000044
Dan Gohman1adf1b02008-08-19 21:45:35 +000045#include "X86GenFastISel.inc"
Evan Cheng8b19e562008-09-03 06:44:39 +000046
47private:
Evan Cheng0de588f2008-09-05 21:00:03 +000048 bool X86FastEmitLoad(MVT VT, unsigned Op0, Value *V, unsigned &RR);
49
50 bool X86FastEmitStore(MVT VT, unsigned Op0, unsigned Op1, Value *V);
51
Dan Gohman3df24e62008-09-03 23:12:08 +000052 bool X86SelectConstAddr(Value *V, unsigned &Op0);
Evan Cheng8b19e562008-09-03 06:44:39 +000053
Dan Gohman3df24e62008-09-03 23:12:08 +000054 bool X86SelectLoad(Instruction *I);
Owen Andersona3971df2008-09-04 07:08:58 +000055
56 bool X86SelectStore(Instruction *I);
Dan Gohman6e3f05f2008-09-04 23:26:51 +000057
58 bool X86SelectCmp(Instruction *I);
Dan Gohmand89ae992008-09-05 01:06:14 +000059
60 bool X86SelectZExt(Instruction *I);
61
62 bool X86SelectBranch(Instruction *I);
Dan Gohmanc39f4db2008-09-05 18:30:08 +000063
64 bool X86SelectShift(Instruction *I);
65
66 bool X86SelectSelect(Instruction *I);
Evan Cheng0de588f2008-09-05 21:00:03 +000067
Evan Cheng10a8d9c2008-09-07 08:47:42 +000068 bool X86SelectTrunc(Instruction *I);
69
Owen Anderson9c7216f2008-09-05 20:49:33 +000070 unsigned TargetMaterializeConstant(Constant *C, MachineConstantPool* MCP);
Evan Chengc3f44b02008-09-03 00:03:49 +000071};
Dan Gohman99b21822008-08-28 23:21:34 +000072
Evan Cheng0de588f2008-09-05 21:00:03 +000073/// X86FastEmitLoad - Emit a machine instruction to load a value of type VT.
74/// The address is either pre-computed, i.e. Op0, or a GlobalAddress, i.e. V.
75/// Return true and the result register by reference if it is possible.
76bool X86FastISel::X86FastEmitLoad(MVT VT, unsigned Op0, Value *V,
77 unsigned &ResultReg) {
78 // Get opcode and regclass of the output for the given load instruction.
79 unsigned Opc = 0;
80 const TargetRegisterClass *RC = NULL;
81 switch (VT.getSimpleVT()) {
82 default: return false;
83 case MVT::i8:
84 Opc = X86::MOV8rm;
85 RC = X86::GR8RegisterClass;
86 break;
87 case MVT::i16:
88 Opc = X86::MOV16rm;
89 RC = X86::GR16RegisterClass;
90 break;
91 case MVT::i32:
92 Opc = X86::MOV32rm;
93 RC = X86::GR32RegisterClass;
94 break;
95 case MVT::i64:
96 // Must be in x86-64 mode.
97 Opc = X86::MOV64rm;
98 RC = X86::GR64RegisterClass;
99 break;
100 case MVT::f32:
101 if (Subtarget->hasSSE1()) {
102 Opc = X86::MOVSSrm;
103 RC = X86::FR32RegisterClass;
104 } else {
105 Opc = X86::LD_Fp32m;
106 RC = X86::RFP32RegisterClass;
107 }
108 break;
109 case MVT::f64:
110 if (Subtarget->hasSSE2()) {
111 Opc = X86::MOVSDrm;
112 RC = X86::FR64RegisterClass;
113 } else {
114 Opc = X86::LD_Fp64m;
115 RC = X86::RFP64RegisterClass;
116 }
117 break;
118 case MVT::f80:
119 Opc = X86::LD_Fp80m;
120 RC = X86::RFP80RegisterClass;
121 break;
122 }
123
124 ResultReg = createResultReg(RC);
125 X86AddressMode AM;
126 if (Op0)
127 // Address is in register.
128 AM.Base.Reg = Op0;
129 else
130 AM.GV = cast<GlobalValue>(V);
131 addFullAddress(BuildMI(MBB, TII.get(Opc), ResultReg), AM);
132 return true;
133}
134
135/// X86FastEmitStore - Emit a machine instruction to store a value Op0 of
136/// type VT. The address is either pre-computed, i.e. Op1, or a GlobalAddress,
137/// i.e. V. Return true if it is possible.
138bool
139X86FastISel::X86FastEmitStore(MVT VT, unsigned Op0, unsigned Op1, Value *V) {
140 // Get opcode and regclass of the output for the given load instruction.
141 unsigned Opc = 0;
142 const TargetRegisterClass *RC = NULL;
143 switch (VT.getSimpleVT()) {
144 default: return false;
145 case MVT::i8:
146 Opc = X86::MOV8mr;
147 RC = X86::GR8RegisterClass;
148 break;
149 case MVT::i16:
150 Opc = X86::MOV16mr;
151 RC = X86::GR16RegisterClass;
152 break;
153 case MVT::i32:
154 Opc = X86::MOV32mr;
155 RC = X86::GR32RegisterClass;
156 break;
157 case MVT::i64:
158 // Must be in x86-64 mode.
159 Opc = X86::MOV64mr;
160 RC = X86::GR64RegisterClass;
161 break;
162 case MVT::f32:
163 if (Subtarget->hasSSE1()) {
164 Opc = X86::MOVSSmr;
165 RC = X86::FR32RegisterClass;
166 } else {
167 Opc = X86::ST_Fp32m;
168 RC = X86::RFP32RegisterClass;
169 }
170 break;
171 case MVT::f64:
172 if (Subtarget->hasSSE2()) {
173 Opc = X86::MOVSDmr;
174 RC = X86::FR64RegisterClass;
175 } else {
176 Opc = X86::ST_Fp64m;
177 RC = X86::RFP64RegisterClass;
178 }
179 break;
180 case MVT::f80:
181 Opc = X86::ST_FP80m;
182 RC = X86::RFP80RegisterClass;
183 break;
184 }
185
186 X86AddressMode AM;
187 if (Op1)
188 // Address is in register.
189 AM.Base.Reg = Op1;
190 else
191 AM.GV = cast<GlobalValue>(V);
192 addFullAddress(BuildMI(MBB, TII.get(Opc)), AM).addReg(Op0);
193 return true;
194}
195
Evan Cheng8b19e562008-09-03 06:44:39 +0000196/// X86SelectConstAddr - Select and emit code to materialize constant address.
197///
198bool X86FastISel::X86SelectConstAddr(Value *V,
Dan Gohman3df24e62008-09-03 23:12:08 +0000199 unsigned &Op0) {
Evan Cheng8b19e562008-09-03 06:44:39 +0000200 // FIXME: Only GlobalAddress for now.
201 GlobalValue *GV = dyn_cast<GlobalValue>(V);
202 if (!GV)
203 return false;
204
205 if (Subtarget->GVRequiresExtraLoad(GV, TM, false)) {
206 // Issue load from stub if necessary.
207 unsigned Opc = 0;
208 const TargetRegisterClass *RC = NULL;
209 if (TLI.getPointerTy() == MVT::i32) {
210 Opc = X86::MOV32rm;
211 RC = X86::GR32RegisterClass;
212 } else {
213 Opc = X86::MOV64rm;
214 RC = X86::GR64RegisterClass;
215 }
216 Op0 = createResultReg(RC);
217 X86AddressMode AM;
218 AM.GV = GV;
219 addFullAddress(BuildMI(MBB, TII.get(Opc), Op0), AM);
Evan Cheng373d50a2008-09-04 06:18:33 +0000220 // Prevent loading GV stub multiple times in same MBB.
221 LocalValueMap[V] = Op0;
Evan Cheng8b19e562008-09-03 06:44:39 +0000222 }
223 return true;
224}
225
Owen Andersona3971df2008-09-04 07:08:58 +0000226/// X86SelectStore - Select and emit code to implement store instructions.
227bool X86FastISel::X86SelectStore(Instruction* I) {
228 MVT VT = MVT::getMVT(I->getOperand(0)->getType());
229 if (VT == MVT::Other || !VT.isSimple())
230 // Unhandled type. Halt "fast" selection and bail.
231 return false;
232 if (VT == MVT::iPTR)
233 // Use pointer type.
234 VT = TLI.getPointerTy();
235 // We only handle legal types. For example, on x86-32 the instruction
236 // selector contains all of the 64-bit instructions from x86-64,
237 // under the assumption that i64 won't be used if the target doesn't
238 // support it.
239 if (!TLI.isTypeLegal(VT))
240 return false;
241 unsigned Op0 = getRegForValue(I->getOperand(0));
242 if (Op0 == 0)
243 // Unhandled operand. Halt "fast" selection and bail.
244 return false;
245
246 Value *V = I->getOperand(1);
247 unsigned Op1 = getRegForValue(V);
248 if (Op1 == 0) {
249 // Handle constant load address.
250 if (!isa<Constant>(V) || !X86SelectConstAddr(V, Op1))
251 // Unhandled operand. Halt "fast" selection and bail.
252 return false;
253 }
Owen Andersona3971df2008-09-04 07:08:58 +0000254
Evan Cheng0de588f2008-09-05 21:00:03 +0000255 return X86FastEmitStore(VT, Op0, Op1, V);
Owen Andersona3971df2008-09-04 07:08:58 +0000256}
257
Evan Cheng8b19e562008-09-03 06:44:39 +0000258/// X86SelectLoad - Select and emit code to implement load instructions.
259///
Dan Gohman3df24e62008-09-03 23:12:08 +0000260bool X86FastISel::X86SelectLoad(Instruction *I) {
Evan Cheng8b19e562008-09-03 06:44:39 +0000261 MVT VT = MVT::getMVT(I->getType(), /*HandleUnknown=*/true);
262 if (VT == MVT::Other || !VT.isSimple())
263 // Unhandled type. Halt "fast" selection and bail.
264 return false;
265 if (VT == MVT::iPTR)
266 // Use pointer type.
267 VT = TLI.getPointerTy();
268 // We only handle legal types. For example, on x86-32 the instruction
269 // selector contains all of the 64-bit instructions from x86-64,
270 // under the assumption that i64 won't be used if the target doesn't
271 // support it.
272 if (!TLI.isTypeLegal(VT))
273 return false;
274
275 Value *V = I->getOperand(0);
Dan Gohman3df24e62008-09-03 23:12:08 +0000276 unsigned Op0 = getRegForValue(V);
Evan Cheng8b19e562008-09-03 06:44:39 +0000277 if (Op0 == 0) {
278 // Handle constant load address.
Evan Cheng0de588f2008-09-05 21:00:03 +0000279 // FIXME: If load type is something we can't handle, this can result in
280 // a dead stub load instruction.
Dan Gohman3df24e62008-09-03 23:12:08 +0000281 if (!isa<Constant>(V) || !X86SelectConstAddr(V, Op0))
Evan Cheng8b19e562008-09-03 06:44:39 +0000282 // Unhandled operand. Halt "fast" selection and bail.
283 return false;
284 }
285
Evan Cheng0de588f2008-09-05 21:00:03 +0000286 unsigned ResultReg = 0;
287 if (X86FastEmitLoad(VT, Op0, V, ResultReg)) {
288 UpdateValueMap(I, ResultReg);
289 return true;
Evan Cheng8b19e562008-09-03 06:44:39 +0000290 }
Evan Cheng0de588f2008-09-05 21:00:03 +0000291 return false;
Evan Cheng8b19e562008-09-03 06:44:39 +0000292}
293
Dan Gohman6e3f05f2008-09-04 23:26:51 +0000294bool X86FastISel::X86SelectCmp(Instruction *I) {
295 CmpInst *CI = cast<CmpInst>(I);
296
Dan Gohman4f22bb02008-09-05 01:33:56 +0000297 MVT VT = TLI.getValueType(I->getOperand(0)->getType());
298 if (!TLI.isTypeLegal(VT))
299 return false;
300
Dan Gohman6e3f05f2008-09-04 23:26:51 +0000301 unsigned Op0Reg = getRegForValue(CI->getOperand(0));
Dan Gohmanf52550b2008-09-05 01:15:35 +0000302 if (Op0Reg == 0) return false;
Dan Gohman6e3f05f2008-09-04 23:26:51 +0000303 unsigned Op1Reg = getRegForValue(CI->getOperand(1));
Dan Gohmanf52550b2008-09-05 01:15:35 +0000304 if (Op1Reg == 0) return false;
305
Dan Gohman6e3f05f2008-09-04 23:26:51 +0000306 unsigned Opc;
Dan Gohmanf52550b2008-09-05 01:15:35 +0000307 switch (VT.getSimpleVT()) {
Dan Gohman6e3f05f2008-09-04 23:26:51 +0000308 case MVT::i8: Opc = X86::CMP8rr; break;
309 case MVT::i16: Opc = X86::CMP16rr; break;
310 case MVT::i32: Opc = X86::CMP32rr; break;
311 case MVT::i64: Opc = X86::CMP64rr; break;
312 case MVT::f32: Opc = X86::UCOMISSrr; break;
313 case MVT::f64: Opc = X86::UCOMISDrr; break;
314 default: return false;
315 }
316
317 unsigned ResultReg = createResultReg(&X86::GR8RegClass);
318 switch (CI->getPredicate()) {
319 case CmpInst::FCMP_OEQ: {
320 unsigned EReg = createResultReg(&X86::GR8RegClass);
321 unsigned NPReg = createResultReg(&X86::GR8RegClass);
322 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
323 BuildMI(MBB, TII.get(X86::SETEr), EReg);
324 BuildMI(MBB, TII.get(X86::SETNPr), NPReg);
325 BuildMI(MBB, TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg);
326 break;
327 }
328 case CmpInst::FCMP_UNE: {
329 unsigned NEReg = createResultReg(&X86::GR8RegClass);
330 unsigned PReg = createResultReg(&X86::GR8RegClass);
331 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
332 BuildMI(MBB, TII.get(X86::SETNEr), NEReg);
333 BuildMI(MBB, TII.get(X86::SETPr), PReg);
334 BuildMI(MBB, TII.get(X86::OR8rr), ResultReg).addReg(PReg).addReg(NEReg);
335 break;
336 }
337 case CmpInst::FCMP_OGT:
338 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
339 BuildMI(MBB, TII.get(X86::SETAr), ResultReg);
340 break;
341 case CmpInst::FCMP_OGE:
342 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
343 BuildMI(MBB, TII.get(X86::SETAEr), ResultReg);
344 break;
345 case CmpInst::FCMP_OLT:
346 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
347 BuildMI(MBB, TII.get(X86::SETAr), ResultReg);
348 break;
349 case CmpInst::FCMP_OLE:
350 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
351 BuildMI(MBB, TII.get(X86::SETAEr), ResultReg);
352 break;
353 case CmpInst::FCMP_ONE:
354 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
355 BuildMI(MBB, TII.get(X86::SETNEr), ResultReg);
356 break;
357 case CmpInst::FCMP_ORD:
358 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
359 BuildMI(MBB, TII.get(X86::SETNPr), ResultReg);
360 break;
361 case CmpInst::FCMP_UNO:
362 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
363 BuildMI(MBB, TII.get(X86::SETPr), ResultReg);
364 break;
365 case CmpInst::FCMP_UEQ:
366 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
367 BuildMI(MBB, TII.get(X86::SETEr), ResultReg);
368 break;
369 case CmpInst::FCMP_UGT:
370 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
371 BuildMI(MBB, TII.get(X86::SETBr), ResultReg);
372 break;
373 case CmpInst::FCMP_UGE:
374 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
375 BuildMI(MBB, TII.get(X86::SETBEr), ResultReg);
376 break;
377 case CmpInst::FCMP_ULT:
378 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
379 BuildMI(MBB, TII.get(X86::SETBr), ResultReg);
380 break;
381 case CmpInst::FCMP_ULE:
382 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
383 BuildMI(MBB, TII.get(X86::SETBEr), ResultReg);
384 break;
385 case CmpInst::ICMP_EQ:
386 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
387 BuildMI(MBB, TII.get(X86::SETEr), ResultReg);
388 break;
389 case CmpInst::ICMP_NE:
390 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
391 BuildMI(MBB, TII.get(X86::SETNEr), ResultReg);
392 break;
393 case CmpInst::ICMP_UGT:
394 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
395 BuildMI(MBB, TII.get(X86::SETAr), ResultReg);
396 break;
397 case CmpInst::ICMP_UGE:
398 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
399 BuildMI(MBB, TII.get(X86::SETAEr), ResultReg);
400 break;
401 case CmpInst::ICMP_ULT:
402 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
403 BuildMI(MBB, TII.get(X86::SETBr), ResultReg);
404 break;
405 case CmpInst::ICMP_ULE:
406 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
407 BuildMI(MBB, TII.get(X86::SETBEr), ResultReg);
408 break;
409 case CmpInst::ICMP_SGT:
410 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
411 BuildMI(MBB, TII.get(X86::SETGr), ResultReg);
412 break;
413 case CmpInst::ICMP_SGE:
414 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
415 BuildMI(MBB, TII.get(X86::SETGEr), ResultReg);
416 break;
417 case CmpInst::ICMP_SLT:
418 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
419 BuildMI(MBB, TII.get(X86::SETLr), ResultReg);
420 break;
421 case CmpInst::ICMP_SLE:
422 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
423 BuildMI(MBB, TII.get(X86::SETLEr), ResultReg);
424 break;
425 default:
426 return false;
427 }
428
429 UpdateValueMap(I, ResultReg);
430 return true;
431}
Evan Cheng8b19e562008-09-03 06:44:39 +0000432
Dan Gohmand89ae992008-09-05 01:06:14 +0000433bool X86FastISel::X86SelectZExt(Instruction *I) {
434 // Special-case hack: The only i1 values we know how to produce currently
435 // set the upper bits of an i8 value to zero.
436 if (I->getType() == Type::Int8Ty &&
437 I->getOperand(0)->getType() == Type::Int1Ty) {
438 unsigned ResultReg = getRegForValue(I->getOperand(0));
Dan Gohmanf52550b2008-09-05 01:15:35 +0000439 if (ResultReg == 0) return false;
Dan Gohmand89ae992008-09-05 01:06:14 +0000440 UpdateValueMap(I, ResultReg);
441 return true;
442 }
443
444 return false;
445}
446
447bool X86FastISel::X86SelectBranch(Instruction *I) {
448 BranchInst *BI = cast<BranchInst>(I);
449 // Unconditional branches are selected by tablegen-generated code.
450 unsigned OpReg = getRegForValue(BI->getCondition());
Dan Gohmanf52550b2008-09-05 01:15:35 +0000451 if (OpReg == 0) return false;
Dan Gohmand89ae992008-09-05 01:06:14 +0000452 MachineBasicBlock *TrueMBB = MBBMap[BI->getSuccessor(0)];
453 MachineBasicBlock *FalseMBB = MBBMap[BI->getSuccessor(1)];
454
455 BuildMI(MBB, TII.get(X86::TEST8rr)).addReg(OpReg).addReg(OpReg);
456 BuildMI(MBB, TII.get(X86::JNE)).addMBB(TrueMBB);
457 BuildMI(MBB, TII.get(X86::JMP)).addMBB(FalseMBB);
458
459 MBB->addSuccessor(TrueMBB);
460 MBB->addSuccessor(FalseMBB);
461
462 return true;
463}
464
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000465bool X86FastISel::X86SelectShift(Instruction *I) {
466 unsigned CReg = 0;
467 unsigned Opc = 0;
468 const TargetRegisterClass *RC = NULL;
469 if (I->getType() == Type::Int8Ty) {
470 CReg = X86::CL;
471 RC = &X86::GR8RegClass;
472 switch (I->getOpcode()) {
Dan Gohman31d26912008-09-05 21:13:04 +0000473 case Instruction::LShr: Opc = X86::SHR8rCL; break;
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000474 case Instruction::AShr: Opc = X86::SAR8rCL; break;
Dan Gohman31d26912008-09-05 21:13:04 +0000475 case Instruction::Shl: Opc = X86::SHL8rCL; break;
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000476 default: return false;
477 }
478 } else if (I->getType() == Type::Int16Ty) {
479 CReg = X86::CX;
480 RC = &X86::GR16RegClass;
481 switch (I->getOpcode()) {
Dan Gohman31d26912008-09-05 21:13:04 +0000482 case Instruction::LShr: Opc = X86::SHR16rCL; break;
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000483 case Instruction::AShr: Opc = X86::SAR16rCL; break;
Dan Gohman31d26912008-09-05 21:13:04 +0000484 case Instruction::Shl: Opc = X86::SHL16rCL; break;
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000485 default: return false;
486 }
487 } else if (I->getType() == Type::Int32Ty) {
488 CReg = X86::ECX;
489 RC = &X86::GR32RegClass;
490 switch (I->getOpcode()) {
Dan Gohman31d26912008-09-05 21:13:04 +0000491 case Instruction::LShr: Opc = X86::SHR32rCL; break;
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000492 case Instruction::AShr: Opc = X86::SAR32rCL; break;
Dan Gohman31d26912008-09-05 21:13:04 +0000493 case Instruction::Shl: Opc = X86::SHL32rCL; break;
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000494 default: return false;
495 }
496 } else if (I->getType() == Type::Int64Ty) {
497 CReg = X86::RCX;
498 RC = &X86::GR64RegClass;
499 switch (I->getOpcode()) {
Dan Gohman31d26912008-09-05 21:13:04 +0000500 case Instruction::LShr: Opc = X86::SHR64rCL; break;
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000501 case Instruction::AShr: Opc = X86::SAR64rCL; break;
Dan Gohman31d26912008-09-05 21:13:04 +0000502 case Instruction::Shl: Opc = X86::SHL64rCL; break;
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000503 default: return false;
504 }
505 } else {
506 return false;
507 }
508
Dan Gohmanf58cb6d2008-09-05 21:27:34 +0000509 MVT VT = MVT::getMVT(I->getType(), /*HandleUnknown=*/true);
510 if (VT == MVT::Other || !TLI.isTypeLegal(VT))
511 return false;
512
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000513 unsigned Op0Reg = getRegForValue(I->getOperand(0));
514 if (Op0Reg == 0) return false;
515 unsigned Op1Reg = getRegForValue(I->getOperand(1));
516 if (Op1Reg == 0) return false;
517 TII.copyRegToReg(*MBB, MBB->end(), CReg, Op1Reg, RC, RC);
518 unsigned ResultReg = createResultReg(RC);
519 BuildMI(MBB, TII.get(Opc), ResultReg).addReg(Op0Reg);
520 UpdateValueMap(I, ResultReg);
521 return true;
522}
523
524bool X86FastISel::X86SelectSelect(Instruction *I) {
Dan Gohmanf58cb6d2008-09-05 21:27:34 +0000525 const Type *Ty = I->getType();
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000526 if (isa<PointerType>(Ty))
527 Ty = TLI.getTargetData()->getIntPtrType();
528
529 unsigned Opc = 0;
530 const TargetRegisterClass *RC = NULL;
531 if (Ty == Type::Int16Ty) {
Dan Gohman31d26912008-09-05 21:13:04 +0000532 Opc = X86::CMOVE16rr;
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000533 RC = &X86::GR16RegClass;
534 } else if (Ty == Type::Int32Ty) {
Dan Gohman31d26912008-09-05 21:13:04 +0000535 Opc = X86::CMOVE32rr;
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000536 RC = &X86::GR32RegClass;
537 } else if (Ty == Type::Int64Ty) {
Dan Gohman31d26912008-09-05 21:13:04 +0000538 Opc = X86::CMOVE64rr;
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000539 RC = &X86::GR64RegClass;
540 } else {
541 return false;
542 }
543
Dan Gohmanf58cb6d2008-09-05 21:27:34 +0000544 MVT VT = MVT::getMVT(Ty, /*HandleUnknown=*/true);
545 if (VT == MVT::Other || !TLI.isTypeLegal(VT))
546 return false;
547
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000548 unsigned Op0Reg = getRegForValue(I->getOperand(0));
549 if (Op0Reg == 0) return false;
550 unsigned Op1Reg = getRegForValue(I->getOperand(1));
551 if (Op1Reg == 0) return false;
552 unsigned Op2Reg = getRegForValue(I->getOperand(2));
553 if (Op2Reg == 0) return false;
554
555 BuildMI(MBB, TII.get(X86::TEST8rr)).addReg(Op0Reg).addReg(Op0Reg);
556 unsigned ResultReg = createResultReg(RC);
557 BuildMI(MBB, TII.get(Opc), ResultReg).addReg(Op1Reg).addReg(Op2Reg);
558 UpdateValueMap(I, ResultReg);
559 return true;
560}
561
Evan Cheng10a8d9c2008-09-07 08:47:42 +0000562bool X86FastISel::X86SelectTrunc(Instruction *I) {
563 if (Subtarget->is64Bit())
564 // All other cases should be handled by the tblgen generated code.
565 return false;
566 MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
567 MVT DstVT = TLI.getValueType(I->getType());
568 if (DstVT != MVT::i8)
569 // All other cases should be handled by the tblgen generated code.
570 return false;
571 if (SrcVT != MVT::i16 && SrcVT != MVT::i32)
572 // All other cases should be handled by the tblgen generated code.
573 return false;
574
575 unsigned InputReg = getRegForValue(I->getOperand(0));
576 if (!InputReg)
577 // Unhandled operand. Halt "fast" selection and bail.
578 return false;
579
580 // First issue a copy to GR16_ or GR32_.
581 unsigned CopyOpc = (SrcVT == MVT::i16) ? X86::MOV16to16_ : X86::MOV32to32_;
582 const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16)
583 ? X86::GR16_RegisterClass : X86::GR32_RegisterClass;
584 unsigned CopyReg = createResultReg(CopyRC);
585 BuildMI(MBB, TII.get(CopyOpc), CopyReg).addReg(InputReg);
586
587 // Then issue an extract_subreg.
588 unsigned ResultReg = FastEmitInst_extractsubreg(CopyReg,1); // x86_subreg_8bit
589 if (!ResultReg)
590 return false;
591
592 UpdateValueMap(I, ResultReg);
593 return true;
594}
595
Dan Gohman99b21822008-08-28 23:21:34 +0000596bool
Dan Gohman3df24e62008-09-03 23:12:08 +0000597X86FastISel::TargetSelectInstruction(Instruction *I) {
Dan Gohman99b21822008-08-28 23:21:34 +0000598 switch (I->getOpcode()) {
599 default: break;
Evan Cheng8b19e562008-09-03 06:44:39 +0000600 case Instruction::Load:
Dan Gohman3df24e62008-09-03 23:12:08 +0000601 return X86SelectLoad(I);
Owen Anderson79924eb2008-09-04 16:48:33 +0000602 case Instruction::Store:
603 return X86SelectStore(I);
Dan Gohman6e3f05f2008-09-04 23:26:51 +0000604 case Instruction::ICmp:
605 case Instruction::FCmp:
606 return X86SelectCmp(I);
Dan Gohmand89ae992008-09-05 01:06:14 +0000607 case Instruction::ZExt:
608 return X86SelectZExt(I);
609 case Instruction::Br:
610 return X86SelectBranch(I);
Dan Gohmanc39f4db2008-09-05 18:30:08 +0000611 case Instruction::LShr:
612 case Instruction::AShr:
613 case Instruction::Shl:
614 return X86SelectShift(I);
615 case Instruction::Select:
616 return X86SelectSelect(I);
Evan Cheng10a8d9c2008-09-07 08:47:42 +0000617 case Instruction::Trunc:
618 return X86SelectTrunc(I);
Dan Gohman99b21822008-08-28 23:21:34 +0000619 }
620
621 return false;
622}
623
Owen Anderson9c7216f2008-09-05 20:49:33 +0000624unsigned X86FastISel::TargetMaterializeConstant(Constant *C,
625 MachineConstantPool* MCP) {
Owen Anderson95267a12008-09-05 00:06:23 +0000626 // Can't handle PIC-mode yet.
627 if (TM.getRelocationModel() == Reloc::PIC_)
628 return 0;
629
630 MVT VT = MVT::getMVT(C->getType(), /*HandleUnknown=*/true);
631 if (VT == MVT::Other || !VT.isSimple())
632 // Unhandled type. Halt "fast" selection and bail.
633 return false;
634 if (VT == MVT::iPTR)
635 // Use pointer type.
636 VT = TLI.getPointerTy();
637 // We only handle legal types. For example, on x86-32 the instruction
638 // selector contains all of the 64-bit instructions from x86-64,
639 // under the assumption that i64 won't be used if the target doesn't
640 // support it.
641 if (!TLI.isTypeLegal(VT))
642 return false;
643
644 // Get opcode and regclass of the output for the given load instruction.
645 unsigned Opc = 0;
646 const TargetRegisterClass *RC = NULL;
647 switch (VT.getSimpleVT()) {
648 default: return false;
649 case MVT::i8:
650 Opc = X86::MOV8rm;
651 RC = X86::GR8RegisterClass;
652 break;
653 case MVT::i16:
654 Opc = X86::MOV16rm;
655 RC = X86::GR16RegisterClass;
656 break;
657 case MVT::i32:
658 Opc = X86::MOV32rm;
659 RC = X86::GR32RegisterClass;
660 break;
661 case MVT::i64:
662 // Must be in x86-64 mode.
663 Opc = X86::MOV64rm;
664 RC = X86::GR64RegisterClass;
665 break;
666 case MVT::f32:
667 if (Subtarget->hasSSE1()) {
668 Opc = X86::MOVSSrm;
669 RC = X86::FR32RegisterClass;
670 } else {
671 Opc = X86::LD_Fp32m;
672 RC = X86::RFP32RegisterClass;
673 }
674 break;
675 case MVT::f64:
676 if (Subtarget->hasSSE2()) {
677 Opc = X86::MOVSDrm;
678 RC = X86::FR64RegisterClass;
679 } else {
680 Opc = X86::LD_Fp64m;
681 RC = X86::RFP64RegisterClass;
682 }
683 break;
684 case MVT::f80:
685 Opc = X86::LD_Fp80m;
686 RC = X86::RFP80RegisterClass;
687 break;
688 }
689
690 unsigned ResultReg = createResultReg(RC);
691 if (isa<GlobalValue>(C)) {
Evan Cheng0de588f2008-09-05 21:00:03 +0000692 // FIXME: If store value type is something we can't handle, this can result
693 // in a dead stub load instruction.
Owen Anderson95267a12008-09-05 00:06:23 +0000694 if (X86SelectConstAddr(C, ResultReg))
695 return ResultReg;
Evan Cheng0de588f2008-09-05 21:00:03 +0000696 return 0;
Owen Anderson95267a12008-09-05 00:06:23 +0000697 }
698
Owen Anderson3b217c62008-09-06 01:11:01 +0000699 // MachineConstantPool wants an explicit alignment.
700 unsigned Align =
701 TM.getTargetData()->getPreferredTypeAlignmentShift(C->getType());
702 if (Align == 0) {
703 // Alignment of vector types. FIXME!
704 Align = TM.getTargetData()->getABITypeSize(C->getType());
705 Align = Log2_64(Align);
706 }
Owen Anderson95267a12008-09-05 00:06:23 +0000707
Owen Anderson3b217c62008-09-06 01:11:01 +0000708 unsigned MCPOffset = MCP->getConstantPoolIndex(C, Align);
Owen Anderson95267a12008-09-05 00:06:23 +0000709 addConstantPoolReference(BuildMI(MBB, TII.get(Opc), ResultReg), MCPOffset);
Owen Anderson95267a12008-09-05 00:06:23 +0000710 return ResultReg;
711}
712
Evan Chengc3f44b02008-09-03 00:03:49 +0000713namespace llvm {
Dan Gohman3df24e62008-09-03 23:12:08 +0000714 llvm::FastISel *X86::createFastISel(MachineFunction &mf,
715 DenseMap<const Value *, unsigned> &vm,
716 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm) {
717 return new X86FastISel(mf, vm, bm);
Evan Chengc3f44b02008-09-03 00:03:49 +0000718 }
Dan Gohman99b21822008-08-28 23:21:34 +0000719}