blob: d7a1c4d01d883152332812f722d8d3b68dbc4095 [file] [log] [blame]
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001//===-- X86ISelPattern.cpp - A pattern matching inst selector for X86 -----===//
Chris Lattner24aad1b2005-01-10 22:10:13 +00002//
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003// The LLVM Compiler Infrastructure
4//
5// This file was developed by the LLVM research group and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
Misha Brukman0e0a7a452005-04-21 23:38:14 +00007//
Chris Lattner8acb1ba2005-01-07 07:49:41 +00008//===----------------------------------------------------------------------===//
9//
10// This file defines a pattern matching instruction selector for X86.
11//
12//===----------------------------------------------------------------------===//
13
14#include "X86.h"
15#include "X86InstrBuilder.h"
16#include "X86RegisterInfo.h"
Chris Lattnerc6f41812005-05-12 23:06:28 +000017#include "llvm/CallingConv.h"
Chris Lattnere3e0f272005-05-09 03:36:39 +000018#include "llvm/Constants.h"
19#include "llvm/Instructions.h"
Chris Lattner8acb1ba2005-01-07 07:49:41 +000020#include "llvm/Function.h"
Chris Lattnere3e0f272005-05-09 03:36:39 +000021#include "llvm/CodeGen/MachineConstantPool.h"
Chris Lattner8acb1ba2005-01-07 07:49:41 +000022#include "llvm/CodeGen/MachineFunction.h"
23#include "llvm/CodeGen/MachineFrameInfo.h"
24#include "llvm/CodeGen/SelectionDAG.h"
25#include "llvm/CodeGen/SelectionDAGISel.h"
26#include "llvm/CodeGen/SSARegMap.h"
27#include "llvm/Target/TargetData.h"
28#include "llvm/Target/TargetLowering.h"
Chris Lattnerc5dcb532005-04-30 04:25:35 +000029#include "llvm/Target/TargetOptions.h"
Chris Lattnere3e0f272005-05-09 03:36:39 +000030#include "llvm/Support/CFG.h"
Chris Lattner8acb1ba2005-01-07 07:49:41 +000031#include "llvm/Support/MathExtras.h"
32#include "llvm/ADT/Statistic.h"
33#include <set>
Jeff Cohen603fea92005-01-12 04:29:05 +000034#include <algorithm>
Chris Lattner8acb1ba2005-01-07 07:49:41 +000035using namespace llvm;
36
Chris Lattnerc6f41812005-05-12 23:06:28 +000037// FIXME: temporary.
38#include "llvm/Support/CommandLine.h"
39static cl::opt<bool> EnableFastCC("enable-x86-fastcc", cl::Hidden,
40 cl::desc("Enable fastcc on X86"));
41
Chris Lattner67649df2005-05-14 06:52:07 +000042namespace {
43 // X86 Specific DAG Nodes
44 namespace X86ISD {
45 enum NodeType {
46 // Start the numbering where the builtin ops leave off.
47 FIRST_NUMBER = ISD::BUILTIN_OP_END,
48
49 /// FILD64m - This instruction implements SINT_TO_FP with a
50 /// 64-bit source in memory and a FP reg result. This corresponds to
51 /// the X86::FILD64m instruction. It has two inputs (token chain and
52 /// address) and two outputs (FP value and token chain).
53 FILD64m,
Chris Lattner239738a2005-05-14 08:48:15 +000054
55 /// CALL/TAILCALL - These operations represent an abstract X86 call
56 /// instruction, which includes a bunch of information. In particular the
57 /// operands of these node are:
58 ///
59 /// #0 - The incoming token chain
60 /// #1 - The callee
61 /// #2 - The number of arg bytes the caller pushes on the stack.
62 /// #3 - The number of arg bytes the callee pops off the stack.
63 /// #4 - The value to pass in AL/AX/EAX (optional)
64 /// #5 - The value to pass in DL/DX/EDX (optional)
65 ///
66 /// The result values of these nodes are:
67 ///
68 /// #0 - The outgoing token chain
69 /// #1 - The first register result value (optional)
70 /// #2 - The second register result value (optional)
71 ///
72 /// The CALL vs TAILCALL distinction boils down to whether the callee is
73 /// known not to modify the caller's stack frame, as is standard with
74 /// LLVM.
75 CALL,
76 TAILCALL,
Chris Lattner67649df2005-05-14 06:52:07 +000077 };
78 }
79}
80
Chris Lattner8acb1ba2005-01-07 07:49:41 +000081//===----------------------------------------------------------------------===//
82// X86TargetLowering - X86 Implementation of the TargetLowering interface
83namespace {
84 class X86TargetLowering : public TargetLowering {
85 int VarArgsFrameIndex; // FrameIndex for start of varargs area.
Chris Lattner14824582005-01-09 00:01:27 +000086 int ReturnAddrIndex; // FrameIndex for return slot.
Chris Lattner381e8872005-05-15 05:46:45 +000087 int BytesToPopOnReturn; // Number of arg bytes ret should pop.
88 int BytesCallerReserves; // Number of arg bytes caller makes.
Chris Lattner8acb1ba2005-01-07 07:49:41 +000089 public:
90 X86TargetLowering(TargetMachine &TM) : TargetLowering(TM) {
91 // Set up the TargetLowering object.
Chris Lattner4df0de92005-01-17 00:00:33 +000092
Chris Lattner653f7232005-05-13 22:46:57 +000093 // X86 is weird, it always uses i8 for shift amounts and setcc results.
Chris Lattner4df0de92005-01-17 00:00:33 +000094 setShiftAmountType(MVT::i8);
95 setSetCCResultType(MVT::i8);
Chris Lattner6659bd72005-04-07 19:41:46 +000096 setSetCCResultContents(ZeroOrOneSetCCResult);
Chris Lattner009b55b2005-01-19 03:36:30 +000097 setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0
Chris Lattner4df0de92005-01-17 00:00:33 +000098
99 // Set up the register classes.
Nate Begemanf63be7d2005-07-06 18:59:04 +0000100 // FIXME: Eliminate these two classes when legalize can handle promotions
101 // well.
102 addRegisterClass(MVT::i1, X86::R8RegisterClass);
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000103 addRegisterClass(MVT::i8, X86::R8RegisterClass);
104 addRegisterClass(MVT::i16, X86::R16RegisterClass);
105 addRegisterClass(MVT::i32, X86::R32RegisterClass);
Nate Begemanf63be7d2005-07-06 18:59:04 +0000106
Chris Lattner67649df2005-05-14 06:52:07 +0000107 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
Chris Lattnerda4d4692005-04-09 03:22:37 +0000108 setOperationAction(ISD::BRCONDTWOWAY , MVT::Other, Expand);
Chris Lattnerda2ce112005-01-16 07:34:08 +0000109 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand);
110 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Expand);
Chris Lattnerda2ce112005-01-16 07:34:08 +0000111 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
Chris Lattnerda2ce112005-01-16 07:34:08 +0000112 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
113 setOperationAction(ISD::SEXTLOAD , MVT::i1 , Expand);
114 setOperationAction(ISD::SREM , MVT::f64 , Expand);
Chris Lattnerc610d422005-05-11 05:00:34 +0000115 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
116 setOperationAction(ISD::CTTZ , MVT::i8 , Expand);
117 setOperationAction(ISD::CTLZ , MVT::i8 , Expand);
118 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
119 setOperationAction(ISD::CTTZ , MVT::i16 , Expand);
120 setOperationAction(ISD::CTLZ , MVT::i16 , Expand);
Andrew Lenharth691ef2b2005-05-03 17:19:30 +0000121 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
122 setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
Andrew Lenharthb5884d32005-05-04 19:25:37 +0000123 setOperationAction(ISD::CTLZ , MVT::i32 , Expand);
Nate Begemanf63be7d2005-07-06 18:59:04 +0000124
Chris Lattner4e6ce5f2005-05-09 20:37:29 +0000125 setOperationAction(ISD::READIO , MVT::i1 , Expand);
126 setOperationAction(ISD::READIO , MVT::i8 , Expand);
127 setOperationAction(ISD::READIO , MVT::i16 , Expand);
128 setOperationAction(ISD::READIO , MVT::i32 , Expand);
129 setOperationAction(ISD::WRITEIO , MVT::i1 , Expand);
130 setOperationAction(ISD::WRITEIO , MVT::i8 , Expand);
131 setOperationAction(ISD::WRITEIO , MVT::i16 , Expand);
132 setOperationAction(ISD::WRITEIO , MVT::i32 , Expand);
Nate Begemanf63be7d2005-07-06 18:59:04 +0000133
Chris Lattnerda2ce112005-01-16 07:34:08 +0000134 // These should be promoted to a larger select which is supported.
Nate Begemanf63be7d2005-07-06 18:59:04 +0000135 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
Chris Lattnerda2ce112005-01-16 07:34:08 +0000136 setOperationAction(ISD::SELECT , MVT::i8 , Promote);
Nate Begemanf63be7d2005-07-06 18:59:04 +0000137
138 if (X86ScalarSSE) {
139 // Set up the FP register classes.
140 addRegisterClass(MVT::f32, X86::RXMMRegisterClass);
141 addRegisterClass(MVT::f64, X86::RXMMRegisterClass);
142
143 setOperationAction(ISD::EXTLOAD, MVT::f32, Expand);
144 setOperationAction(ISD::ZEXTLOAD, MVT::f32, Expand);
145
146 // We don't support sin/cos/sqrt/fmod
147 setOperationAction(ISD::FSIN , MVT::f64, Expand);
148 setOperationAction(ISD::FCOS , MVT::f64, Expand);
149 setOperationAction(ISD::FABS , MVT::f64, Expand);
150 setOperationAction(ISD::FNEG , MVT::f64, Expand);
151 setOperationAction(ISD::SREM , MVT::f64, Expand);
152 setOperationAction(ISD::FSIN , MVT::f32, Expand);
153 setOperationAction(ISD::FCOS , MVT::f32, Expand);
154 setOperationAction(ISD::FABS , MVT::f32, Expand);
155 setOperationAction(ISD::FNEG , MVT::f32, Expand);
156 setOperationAction(ISD::SREM , MVT::f32, Expand);
157 } else {
158 // Set up the FP register classes.
159 addRegisterClass(MVT::f64, X86::RFPRegisterClass);
160
161 if (!UnsafeFPMath) {
162 setOperationAction(ISD::FSIN , MVT::f64 , Expand);
163 setOperationAction(ISD::FCOS , MVT::f64 , Expand);
164 }
165
166 addLegalFPImmediate(+0.0); // FLD0
167 addLegalFPImmediate(+1.0); // FLD1
168 addLegalFPImmediate(-0.0); // FLD0/FCHS
169 addLegalFPImmediate(-1.0); // FLD1/FCHS
170 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000171 computeRegisterProperties();
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000172 }
Nate Begemanf63be7d2005-07-06 18:59:04 +0000173
Chris Lattner3648c672005-05-13 21:44:04 +0000174 // Return the number of bytes that a function should pop when it returns (in
175 // addition to the space used by the return address).
176 //
177 unsigned getBytesToPopOnReturn() const { return BytesToPopOnReturn; }
178
Chris Lattner381e8872005-05-15 05:46:45 +0000179 // Return the number of bytes that the caller reserves for arguments passed
180 // to this function.
181 unsigned getBytesCallerReserves() const { return BytesCallerReserves; }
182
Chris Lattner67649df2005-05-14 06:52:07 +0000183 /// LowerOperation - Provide custom lowering hooks for some operations.
184 ///
185 virtual SDOperand LowerOperation(SDOperand Op, SelectionDAG &DAG);
186
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000187 /// LowerArguments - This hook must be implemented to indicate how we should
188 /// lower the arguments for the specified function, into the specified DAG.
189 virtual std::vector<SDOperand>
190 LowerArguments(Function &F, SelectionDAG &DAG);
191
192 /// LowerCallTo - This hook lowers an abstract call to a function into an
193 /// actual call.
Chris Lattner5188ad72005-01-08 19:28:19 +0000194 virtual std::pair<SDOperand, SDOperand>
Chris Lattnerc57f6822005-05-12 19:56:45 +0000195 LowerCallTo(SDOperand Chain, const Type *RetTy, bool isVarArg, unsigned CC,
Chris Lattneradf6a962005-05-13 18:50:42 +0000196 bool isTailCall, SDOperand Callee, ArgListTy &Args,
197 SelectionDAG &DAG);
Chris Lattner14824582005-01-09 00:01:27 +0000198
Chris Lattnere0fe2252005-07-05 19:58:54 +0000199 virtual SDOperand LowerVAStart(SDOperand Chain, SDOperand VAListP,
200 Value *VAListV, SelectionDAG &DAG);
Chris Lattner14824582005-01-09 00:01:27 +0000201 virtual std::pair<SDOperand,SDOperand>
Chris Lattnere0fe2252005-07-05 19:58:54 +0000202 LowerVAArg(SDOperand Chain, SDOperand VAListP, Value *VAListV,
203 const Type *ArgTy, SelectionDAG &DAG);
204
Chris Lattner14824582005-01-09 00:01:27 +0000205 virtual std::pair<SDOperand, SDOperand>
206 LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain, unsigned Depth,
207 SelectionDAG &DAG);
Chris Lattner381e8872005-05-15 05:46:45 +0000208
209 SDOperand getReturnAddressFrameIndex(SelectionDAG &DAG);
210
Chris Lattnerc6f41812005-05-12 23:06:28 +0000211 private:
212 // C Calling Convention implementation.
213 std::vector<SDOperand> LowerCCCArguments(Function &F, SelectionDAG &DAG);
214 std::pair<SDOperand, SDOperand>
215 LowerCCCCallTo(SDOperand Chain, const Type *RetTy, bool isVarArg,
Chris Lattner2e7714a2005-05-13 20:29:13 +0000216 bool isTailCall,
Chris Lattnerc6f41812005-05-12 23:06:28 +0000217 SDOperand Callee, ArgListTy &Args, SelectionDAG &DAG);
218
219 // Fast Calling Convention implementation.
220 std::vector<SDOperand> LowerFastCCArguments(Function &F, SelectionDAG &DAG);
221 std::pair<SDOperand, SDOperand>
Chris Lattner2e7714a2005-05-13 20:29:13 +0000222 LowerFastCCCallTo(SDOperand Chain, const Type *RetTy, bool isTailCall,
Chris Lattnerc6f41812005-05-12 23:06:28 +0000223 SDOperand Callee, ArgListTy &Args, SelectionDAG &DAG);
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000224 };
225}
226
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000227std::vector<SDOperand>
228X86TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
Chris Lattnerc6f41812005-05-12 23:06:28 +0000229 if (F.getCallingConv() == CallingConv::Fast && EnableFastCC)
230 return LowerFastCCArguments(F, DAG);
231 return LowerCCCArguments(F, DAG);
232}
233
234std::pair<SDOperand, SDOperand>
235X86TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy,
236 bool isVarArg, unsigned CallingConv,
Chris Lattneradf6a962005-05-13 18:50:42 +0000237 bool isTailCall,
Chris Lattnerc6f41812005-05-12 23:06:28 +0000238 SDOperand Callee, ArgListTy &Args,
239 SelectionDAG &DAG) {
240 assert((!isVarArg || CallingConv == CallingConv::C) &&
241 "Only C takes varargs!");
242 if (CallingConv == CallingConv::Fast && EnableFastCC)
Chris Lattner2e7714a2005-05-13 20:29:13 +0000243 return LowerFastCCCallTo(Chain, RetTy, isTailCall, Callee, Args, DAG);
244 return LowerCCCCallTo(Chain, RetTy, isVarArg, isTailCall, Callee, Args, DAG);
Chris Lattnerc6f41812005-05-12 23:06:28 +0000245}
246
247//===----------------------------------------------------------------------===//
Chris Lattner653f7232005-05-13 22:46:57 +0000248// C Calling Convention implementation
Chris Lattnerc6f41812005-05-12 23:06:28 +0000249//===----------------------------------------------------------------------===//
250
251std::vector<SDOperand>
252X86TargetLowering::LowerCCCArguments(Function &F, SelectionDAG &DAG) {
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000253 std::vector<SDOperand> ArgValues;
254
Chris Lattner6415bb42005-05-10 03:53:18 +0000255 MachineFunction &MF = DAG.getMachineFunction();
256 MachineFrameInfo *MFI = MF.getFrameInfo();
257
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000258 // Add DAG nodes to load the arguments... On entry to a function on the X86,
259 // the stack frame looks like this:
260 //
261 // [ESP] -- return address
262 // [ESP + 4] -- first argument (leftmost lexically)
263 // [ESP + 8] -- second argument, if first argument is four bytes in size
Misha Brukman0e0a7a452005-04-21 23:38:14 +0000264 // ...
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000265 //
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000266 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
Chris Lattnere4d5c442005-03-15 04:54:21 +0000267 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000268 MVT::ValueType ObjectVT = getValueType(I->getType());
269 unsigned ArgIncrement = 4;
270 unsigned ObjSize;
271 switch (ObjectVT) {
272 default: assert(0 && "Unhandled argument type!");
273 case MVT::i1:
274 case MVT::i8: ObjSize = 1; break;
275 case MVT::i16: ObjSize = 2; break;
276 case MVT::i32: ObjSize = 4; break;
277 case MVT::i64: ObjSize = ArgIncrement = 8; break;
278 case MVT::f32: ObjSize = 4; break;
279 case MVT::f64: ObjSize = ArgIncrement = 8; break;
280 }
281 // Create the frame index object for this incoming parameter...
282 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
Misha Brukman0e0a7a452005-04-21 23:38:14 +0000283
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000284 // Create the SelectionDAG nodes corresponding to a load from this parameter
285 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
286
287 // Don't codegen dead arguments. FIXME: remove this check when we can nuke
288 // dead loads.
289 SDOperand ArgValue;
290 if (!I->use_empty())
Chris Lattnera80d2bd2005-05-09 05:40:26 +0000291 ArgValue = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN,
292 DAG.getSrcValue(NULL));
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000293 else {
294 if (MVT::isInteger(ObjectVT))
295 ArgValue = DAG.getConstant(0, ObjectVT);
296 else
297 ArgValue = DAG.getConstantFP(0, ObjectVT);
298 }
299 ArgValues.push_back(ArgValue);
300
301 ArgOffset += ArgIncrement; // Move on to the next argument...
302 }
303
304 // If the function takes variable number of arguments, make a frame index for
305 // the start of the first vararg value... for expansion of llvm.va_start.
306 if (F.isVarArg())
307 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
Chris Lattner3648c672005-05-13 21:44:04 +0000308 ReturnAddrIndex = 0; // No return address slot generated yet.
309 BytesToPopOnReturn = 0; // Callee pops nothing.
Chris Lattner381e8872005-05-15 05:46:45 +0000310 BytesCallerReserves = ArgOffset;
Chris Lattner4c52f0e2005-04-09 15:23:56 +0000311
312 // Finally, inform the code generator which regs we return values in.
313 switch (getValueType(F.getReturnType())) {
314 default: assert(0 && "Unknown type!");
315 case MVT::isVoid: break;
316 case MVT::i1:
317 case MVT::i8:
318 case MVT::i16:
319 case MVT::i32:
320 MF.addLiveOut(X86::EAX);
321 break;
322 case MVT::i64:
323 MF.addLiveOut(X86::EAX);
324 MF.addLiveOut(X86::EDX);
325 break;
326 case MVT::f32:
327 case MVT::f64:
328 MF.addLiveOut(X86::ST0);
329 break;
330 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000331 return ArgValues;
332}
333
Chris Lattner5188ad72005-01-08 19:28:19 +0000334std::pair<SDOperand, SDOperand>
Chris Lattnerc6f41812005-05-12 23:06:28 +0000335X86TargetLowering::LowerCCCCallTo(SDOperand Chain, const Type *RetTy,
Chris Lattner2e7714a2005-05-13 20:29:13 +0000336 bool isVarArg, bool isTailCall,
337 SDOperand Callee, ArgListTy &Args,
338 SelectionDAG &DAG) {
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000339 // Count how many bytes are to be pushed on the stack.
340 unsigned NumBytes = 0;
341
342 if (Args.empty()) {
343 // Save zero bytes.
Chris Lattner16cd04d2005-05-12 23:24:06 +0000344 Chain = DAG.getNode(ISD::CALLSEQ_START, MVT::Other, Chain,
Chris Lattner5188ad72005-01-08 19:28:19 +0000345 DAG.getConstant(0, getPointerTy()));
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000346 } else {
347 for (unsigned i = 0, e = Args.size(); i != e; ++i)
348 switch (getValueType(Args[i].second)) {
349 default: assert(0 && "Unknown value type!");
350 case MVT::i1:
351 case MVT::i8:
352 case MVT::i16:
353 case MVT::i32:
354 case MVT::f32:
355 NumBytes += 4;
356 break;
357 case MVT::i64:
358 case MVT::f64:
359 NumBytes += 8;
360 break;
361 }
362
Chris Lattner16cd04d2005-05-12 23:24:06 +0000363 Chain = DAG.getNode(ISD::CALLSEQ_START, MVT::Other, Chain,
Chris Lattner5188ad72005-01-08 19:28:19 +0000364 DAG.getConstant(NumBytes, getPointerTy()));
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000365
366 // Arguments go on the stack in reverse order, as specified by the ABI.
367 unsigned ArgOffset = 0;
Chris Lattner7f2afac2005-01-14 22:37:41 +0000368 SDOperand StackPtr = DAG.getCopyFromReg(X86::ESP, MVT::i32,
369 DAG.getEntryNode());
Chris Lattnerb62e1e22005-01-21 19:46:38 +0000370 std::vector<SDOperand> Stores;
371
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000372 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000373 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
374 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
375
376 switch (getValueType(Args[i].second)) {
377 default: assert(0 && "Unexpected ValueType for argument!");
378 case MVT::i1:
379 case MVT::i8:
380 case MVT::i16:
381 // Promote the integer to 32 bits. If the input type is signed use a
382 // sign extend, otherwise use a zero extend.
383 if (Args[i].second->isSigned())
384 Args[i].first =DAG.getNode(ISD::SIGN_EXTEND, MVT::i32, Args[i].first);
385 else
386 Args[i].first =DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Args[i].first);
387
388 // FALL THROUGH
389 case MVT::i32:
390 case MVT::f32:
Chris Lattnerb62e1e22005-01-21 19:46:38 +0000391 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
Chris Lattnera80d2bd2005-05-09 05:40:26 +0000392 Args[i].first, PtrOff,
393 DAG.getSrcValue(NULL)));
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000394 ArgOffset += 4;
395 break;
396 case MVT::i64:
397 case MVT::f64:
Chris Lattnerb62e1e22005-01-21 19:46:38 +0000398 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
Chris Lattnera80d2bd2005-05-09 05:40:26 +0000399 Args[i].first, PtrOff,
400 DAG.getSrcValue(NULL)));
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000401 ArgOffset += 8;
402 break;
403 }
404 }
Chris Lattnerb62e1e22005-01-21 19:46:38 +0000405 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, Stores);
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000406 }
407
408 std::vector<MVT::ValueType> RetVals;
409 MVT::ValueType RetTyVT = getValueType(RetTy);
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000410 RetVals.push_back(MVT::Other);
411
Chris Lattner239738a2005-05-14 08:48:15 +0000412 // The result values produced have to be legal. Promote the result.
413 switch (RetTyVT) {
414 case MVT::isVoid: break;
415 default:
416 RetVals.push_back(RetTyVT);
417 break;
418 case MVT::i1:
419 case MVT::i8:
420 case MVT::i16:
421 RetVals.push_back(MVT::i32);
422 break;
423 case MVT::f32:
Nate Begemanf63be7d2005-07-06 18:59:04 +0000424 if (X86ScalarSSE)
425 RetVals.push_back(MVT::f32);
426 else
427 RetVals.push_back(MVT::f64);
Chris Lattner239738a2005-05-14 08:48:15 +0000428 break;
429 case MVT::i64:
430 RetVals.push_back(MVT::i32);
431 RetVals.push_back(MVT::i32);
432 break;
433 }
434 std::vector<SDOperand> Ops;
435 Ops.push_back(Chain);
436 Ops.push_back(Callee);
437 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
438 Ops.push_back(DAG.getConstant(0, getPointerTy()));
439 SDOperand TheCall = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
440 RetVals, Ops);
441 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, TheCall);
442
443 SDOperand ResultVal;
444 switch (RetTyVT) {
445 case MVT::isVoid: break;
446 default:
447 ResultVal = TheCall.getValue(1);
448 break;
449 case MVT::i1:
450 case MVT::i8:
451 case MVT::i16:
452 ResultVal = DAG.getNode(ISD::TRUNCATE, RetTyVT, TheCall.getValue(1));
453 break;
454 case MVT::f32:
455 // FIXME: we would really like to remember that this FP_ROUND operation is
456 // okay to eliminate if we allow excess FP precision.
457 ResultVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, TheCall.getValue(1));
458 break;
459 case MVT::i64:
460 ResultVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, TheCall.getValue(1),
461 TheCall.getValue(2));
462 break;
463 }
464
465 return std::make_pair(ResultVal, Chain);
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000466}
467
Chris Lattnere0fe2252005-07-05 19:58:54 +0000468SDOperand
469X86TargetLowering::LowerVAStart(SDOperand Chain, SDOperand VAListP,
470 Value *VAListV, SelectionDAG &DAG) {
Andrew Lenharth558bc882005-06-18 18:34:52 +0000471 // vastart just stores the address of the VarArgsFrameIndex slot.
472 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32);
Chris Lattnere0fe2252005-07-05 19:58:54 +0000473 return DAG.getNode(ISD::STORE, MVT::Other, Chain, FR, VAListP,
474 DAG.getSrcValue(VAListV));
Chris Lattner14824582005-01-09 00:01:27 +0000475}
476
Chris Lattnere0fe2252005-07-05 19:58:54 +0000477
478std::pair<SDOperand,SDOperand>
479X86TargetLowering::LowerVAArg(SDOperand Chain, SDOperand VAListP,
480 Value *VAListV, const Type *ArgTy,
481 SelectionDAG &DAG) {
Chris Lattner14824582005-01-09 00:01:27 +0000482 MVT::ValueType ArgVT = getValueType(ArgTy);
Chris Lattnere0fe2252005-07-05 19:58:54 +0000483 SDOperand Val = DAG.getLoad(MVT::i32, Chain,
484 VAListP, DAG.getSrcValue(VAListV));
485 SDOperand Result = DAG.getLoad(ArgVT, Chain, Val,
Chris Lattner08568cf2005-07-05 17:50:16 +0000486 DAG.getSrcValue(NULL));
Andrew Lenharth558bc882005-06-18 18:34:52 +0000487 unsigned Amt;
488 if (ArgVT == MVT::i32)
489 Amt = 4;
490 else {
491 assert((ArgVT == MVT::i64 || ArgVT == MVT::f64) &&
492 "Other types should have been promoted for varargs!");
493 Amt = 8;
Chris Lattner14824582005-01-09 00:01:27 +0000494 }
Andrew Lenharth558bc882005-06-18 18:34:52 +0000495 Val = DAG.getNode(ISD::ADD, Val.getValueType(), Val,
496 DAG.getConstant(Amt, Val.getValueType()));
497 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain,
Chris Lattnere0fe2252005-07-05 19:58:54 +0000498 Val, VAListP, DAG.getSrcValue(VAListV));
Chris Lattner14824582005-01-09 00:01:27 +0000499 return std::make_pair(Result, Chain);
500}
Misha Brukman0e0a7a452005-04-21 23:38:14 +0000501
Chris Lattnerc6f41812005-05-12 23:06:28 +0000502//===----------------------------------------------------------------------===//
Chris Lattner653f7232005-05-13 22:46:57 +0000503// Fast Calling Convention implementation
Chris Lattnerc6f41812005-05-12 23:06:28 +0000504//===----------------------------------------------------------------------===//
505//
506// The X86 'fast' calling convention passes up to two integer arguments in
507// registers (an appropriate portion of EAX/EDX), passes arguments in C order,
508// and requires that the callee pop its arguments off the stack (allowing proper
509// tail calls), and has the same return value conventions as C calling convs.
510//
Chris Lattner10d26452005-05-13 23:49:10 +0000511// This calling convention always arranges for the callee pop value to be 8n+4
512// bytes, which is needed for tail recursion elimination and stack alignment
513// reasons.
514//
Chris Lattnerc6f41812005-05-12 23:06:28 +0000515// Note that this can be enhanced in the future to pass fp vals in registers
516// (when we have a global fp allocator) and do other tricks.
517//
Chris Lattner63602fb2005-05-13 07:38:09 +0000518
519/// AddLiveIn - This helper function adds the specified physical register to the
520/// MachineFunction as a live in value. It also creates a corresponding virtual
521/// register for it.
522static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg,
523 TargetRegisterClass *RC) {
524 assert(RC->contains(PReg) && "Not the correct regclass!");
525 unsigned VReg = MF.getSSARegMap()->createVirtualRegister(RC);
526 MF.addLiveIn(PReg, VReg);
527 return VReg;
528}
529
530
Chris Lattnerc6f41812005-05-12 23:06:28 +0000531std::vector<SDOperand>
532X86TargetLowering::LowerFastCCArguments(Function &F, SelectionDAG &DAG) {
533 std::vector<SDOperand> ArgValues;
534
535 MachineFunction &MF = DAG.getMachineFunction();
536 MachineFrameInfo *MFI = MF.getFrameInfo();
537
538 // Add DAG nodes to load the arguments... On entry to a function the stack
539 // frame looks like this:
540 //
541 // [ESP] -- return address
542 // [ESP + 4] -- first nonreg argument (leftmost lexically)
543 // [ESP + 8] -- second nonreg argument, if first argument is 4 bytes in size
544 // ...
545 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
546
547 // Keep track of the number of integer regs passed so far. This can be either
548 // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
549 // used).
550 unsigned NumIntRegs = 0;
551
552 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
553 MVT::ValueType ObjectVT = getValueType(I->getType());
554 unsigned ArgIncrement = 4;
555 unsigned ObjSize = 0;
556 SDOperand ArgValue;
557
558 switch (ObjectVT) {
559 default: assert(0 && "Unhandled argument type!");
560 case MVT::i1:
561 case MVT::i8:
562 if (NumIntRegs < 2) {
563 if (!I->use_empty()) {
Chris Lattner63602fb2005-05-13 07:38:09 +0000564 unsigned VReg = AddLiveIn(MF, NumIntRegs ? X86::DL : X86::AL,
565 X86::R8RegisterClass);
566 ArgValue = DAG.getCopyFromReg(VReg, MVT::i8, DAG.getRoot());
Chris Lattnerc6f41812005-05-12 23:06:28 +0000567 DAG.setRoot(ArgValue.getValue(1));
568 }
569 ++NumIntRegs;
570 break;
571 }
572
573 ObjSize = 1;
574 break;
575 case MVT::i16:
576 if (NumIntRegs < 2) {
577 if (!I->use_empty()) {
Chris Lattner63602fb2005-05-13 07:38:09 +0000578 unsigned VReg = AddLiveIn(MF, NumIntRegs ? X86::DX : X86::AX,
579 X86::R16RegisterClass);
580 ArgValue = DAG.getCopyFromReg(VReg, MVT::i16, DAG.getRoot());
Chris Lattnerc6f41812005-05-12 23:06:28 +0000581 DAG.setRoot(ArgValue.getValue(1));
582 }
583 ++NumIntRegs;
584 break;
585 }
586 ObjSize = 2;
587 break;
588 case MVT::i32:
589 if (NumIntRegs < 2) {
590 if (!I->use_empty()) {
Chris Lattner63602fb2005-05-13 07:38:09 +0000591 unsigned VReg = AddLiveIn(MF,NumIntRegs ? X86::EDX : X86::EAX,
592 X86::R32RegisterClass);
593 ArgValue = DAG.getCopyFromReg(VReg, MVT::i32, DAG.getRoot());
Chris Lattnerc6f41812005-05-12 23:06:28 +0000594 DAG.setRoot(ArgValue.getValue(1));
595 }
596 ++NumIntRegs;
597 break;
598 }
599 ObjSize = 4;
600 break;
601 case MVT::i64:
602 if (NumIntRegs == 0) {
603 if (!I->use_empty()) {
Chris Lattner63602fb2005-05-13 07:38:09 +0000604 unsigned BotReg = AddLiveIn(MF, X86::EAX, X86::R32RegisterClass);
605 unsigned TopReg = AddLiveIn(MF, X86::EDX, X86::R32RegisterClass);
Chris Lattnerc6f41812005-05-12 23:06:28 +0000606
Chris Lattner63602fb2005-05-13 07:38:09 +0000607 SDOperand Low=DAG.getCopyFromReg(BotReg, MVT::i32, DAG.getRoot());
608 SDOperand Hi =DAG.getCopyFromReg(TopReg, MVT::i32, Low.getValue(1));
Chris Lattnerc6f41812005-05-12 23:06:28 +0000609 DAG.setRoot(Hi.getValue(1));
610
611 ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Low, Hi);
612 }
613 NumIntRegs = 2;
614 break;
615 } else if (NumIntRegs == 1) {
616 if (!I->use_empty()) {
Chris Lattner63602fb2005-05-13 07:38:09 +0000617 unsigned BotReg = AddLiveIn(MF, X86::EDX, X86::R32RegisterClass);
618 SDOperand Low = DAG.getCopyFromReg(BotReg, MVT::i32, DAG.getRoot());
Chris Lattnerc6f41812005-05-12 23:06:28 +0000619 DAG.setRoot(Low.getValue(1));
620
621 // Load the high part from memory.
622 // Create the frame index object for this incoming parameter...
623 int FI = MFI->CreateFixedObject(4, ArgOffset);
624 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
625 SDOperand Hi = DAG.getLoad(MVT::i32, DAG.getEntryNode(), FIN,
626 DAG.getSrcValue(NULL));
627 ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Low, Hi);
628 }
629 ArgOffset += 4;
630 NumIntRegs = 2;
631 break;
632 }
633 ObjSize = ArgIncrement = 8;
634 break;
635 case MVT::f32: ObjSize = 4; break;
636 case MVT::f64: ObjSize = ArgIncrement = 8; break;
637 }
638
639 // Don't codegen dead arguments. FIXME: remove this check when we can nuke
640 // dead loads.
641 if (ObjSize && !I->use_empty()) {
642 // Create the frame index object for this incoming parameter...
643 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
644
645 // Create the SelectionDAG nodes corresponding to a load from this
646 // parameter.
647 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
648
649 ArgValue = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN,
650 DAG.getSrcValue(NULL));
651 } else if (ArgValue.Val == 0) {
652 if (MVT::isInteger(ObjectVT))
653 ArgValue = DAG.getConstant(0, ObjectVT);
654 else
655 ArgValue = DAG.getConstantFP(0, ObjectVT);
656 }
657 ArgValues.push_back(ArgValue);
658
659 if (ObjSize)
660 ArgOffset += ArgIncrement; // Move on to the next argument.
661 }
662
Chris Lattner10d26452005-05-13 23:49:10 +0000663 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
664 // arguments and the arguments after the retaddr has been pushed are aligned.
665 if ((ArgOffset & 7) == 0)
666 ArgOffset += 4;
667
Chris Lattner3648c672005-05-13 21:44:04 +0000668 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs.
669 ReturnAddrIndex = 0; // No return address slot generated yet.
670 BytesToPopOnReturn = ArgOffset; // Callee pops all stack arguments.
Chris Lattner381e8872005-05-15 05:46:45 +0000671 BytesCallerReserves = 0;
Chris Lattnerc6f41812005-05-12 23:06:28 +0000672
673 // Finally, inform the code generator which regs we return values in.
674 switch (getValueType(F.getReturnType())) {
675 default: assert(0 && "Unknown type!");
676 case MVT::isVoid: break;
677 case MVT::i1:
678 case MVT::i8:
679 case MVT::i16:
680 case MVT::i32:
681 MF.addLiveOut(X86::EAX);
682 break;
683 case MVT::i64:
684 MF.addLiveOut(X86::EAX);
685 MF.addLiveOut(X86::EDX);
686 break;
687 case MVT::f32:
688 case MVT::f64:
689 MF.addLiveOut(X86::ST0);
690 break;
691 }
692 return ArgValues;
693}
694
695std::pair<SDOperand, SDOperand>
696X86TargetLowering::LowerFastCCCallTo(SDOperand Chain, const Type *RetTy,
Chris Lattner2e7714a2005-05-13 20:29:13 +0000697 bool isTailCall, SDOperand Callee,
Chris Lattnerc6f41812005-05-12 23:06:28 +0000698 ArgListTy &Args, SelectionDAG &DAG) {
699 // Count how many bytes are to be pushed on the stack.
700 unsigned NumBytes = 0;
701
702 // Keep track of the number of integer regs passed so far. This can be either
703 // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
704 // used).
705 unsigned NumIntRegs = 0;
706
707 for (unsigned i = 0, e = Args.size(); i != e; ++i)
708 switch (getValueType(Args[i].second)) {
709 default: assert(0 && "Unknown value type!");
710 case MVT::i1:
711 case MVT::i8:
712 case MVT::i16:
713 case MVT::i32:
714 if (NumIntRegs < 2) {
715 ++NumIntRegs;
716 break;
717 }
718 // fall through
719 case MVT::f32:
720 NumBytes += 4;
721 break;
722 case MVT::i64:
723 if (NumIntRegs == 0) {
724 NumIntRegs = 2;
725 break;
726 } else if (NumIntRegs == 1) {
727 NumIntRegs = 2;
728 NumBytes += 4;
729 break;
730 }
731
732 // fall through
733 case MVT::f64:
734 NumBytes += 8;
735 break;
736 }
737
Chris Lattner10d26452005-05-13 23:49:10 +0000738 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
739 // arguments and the arguments after the retaddr has been pushed are aligned.
740 if ((NumBytes & 7) == 0)
741 NumBytes += 4;
742
Chris Lattner16cd04d2005-05-12 23:24:06 +0000743 Chain = DAG.getNode(ISD::CALLSEQ_START, MVT::Other, Chain,
Chris Lattnerc6f41812005-05-12 23:06:28 +0000744 DAG.getConstant(NumBytes, getPointerTy()));
745
746 // Arguments go on the stack in reverse order, as specified by the ABI.
747 unsigned ArgOffset = 0;
748 SDOperand StackPtr = DAG.getCopyFromReg(X86::ESP, MVT::i32,
749 DAG.getEntryNode());
750 NumIntRegs = 0;
751 std::vector<SDOperand> Stores;
752 std::vector<SDOperand> RegValuesToPass;
753 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
754 switch (getValueType(Args[i].second)) {
755 default: assert(0 && "Unexpected ValueType for argument!");
756 case MVT::i1:
757 case MVT::i8:
758 case MVT::i16:
759 case MVT::i32:
760 if (NumIntRegs < 2) {
761 RegValuesToPass.push_back(Args[i].first);
762 ++NumIntRegs;
763 break;
764 }
765 // Fall through
766 case MVT::f32: {
767 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
768 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
769 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
770 Args[i].first, PtrOff,
771 DAG.getSrcValue(NULL)));
772 ArgOffset += 4;
773 break;
774 }
775 case MVT::i64:
776 if (NumIntRegs < 2) { // Can pass part of it in regs?
777 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
778 Args[i].first, DAG.getConstant(1, MVT::i32));
779 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
780 Args[i].first, DAG.getConstant(0, MVT::i32));
781 RegValuesToPass.push_back(Lo);
782 ++NumIntRegs;
783 if (NumIntRegs < 2) { // Pass both parts in regs?
784 RegValuesToPass.push_back(Hi);
785 ++NumIntRegs;
786 } else {
787 // Pass the high part in memory.
788 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
789 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
790 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
Chris Lattner920c0aa2005-05-14 12:03:10 +0000791 Hi, PtrOff, DAG.getSrcValue(NULL)));
Chris Lattnerc6f41812005-05-12 23:06:28 +0000792 ArgOffset += 4;
793 }
794 break;
795 }
796 // Fall through
797 case MVT::f64:
798 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
799 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
800 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
801 Args[i].first, PtrOff,
802 DAG.getSrcValue(NULL)));
803 ArgOffset += 8;
804 break;
805 }
806 }
807 if (!Stores.empty())
808 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, Stores);
809
Chris Lattner10d26452005-05-13 23:49:10 +0000810 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
811 // arguments and the arguments after the retaddr has been pushed are aligned.
812 if ((ArgOffset & 7) == 0)
813 ArgOffset += 4;
814
Chris Lattner239738a2005-05-14 08:48:15 +0000815 std::vector<MVT::ValueType> RetVals;
816 MVT::ValueType RetTyVT = getValueType(RetTy);
817
818 RetVals.push_back(MVT::Other);
819
820 // The result values produced have to be legal. Promote the result.
821 switch (RetTyVT) {
822 case MVT::isVoid: break;
823 default:
824 RetVals.push_back(RetTyVT);
825 break;
826 case MVT::i1:
827 case MVT::i8:
828 case MVT::i16:
829 RetVals.push_back(MVT::i32);
830 break;
831 case MVT::f32:
Nate Begemanf63be7d2005-07-06 18:59:04 +0000832 if (X86ScalarSSE)
833 RetVals.push_back(MVT::f32);
834 else
835 RetVals.push_back(MVT::f64);
Chris Lattner239738a2005-05-14 08:48:15 +0000836 break;
837 case MVT::i64:
838 RetVals.push_back(MVT::i32);
839 RetVals.push_back(MVT::i32);
840 break;
841 }
842
843 std::vector<SDOperand> Ops;
844 Ops.push_back(Chain);
845 Ops.push_back(Callee);
846 Ops.push_back(DAG.getConstant(ArgOffset, getPointerTy()));
847 // Callee pops all arg values on the stack.
848 Ops.push_back(DAG.getConstant(ArgOffset, getPointerTy()));
849
850 // Pass register arguments as needed.
851 Ops.insert(Ops.end(), RegValuesToPass.begin(), RegValuesToPass.end());
852
853 SDOperand TheCall = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
854 RetVals, Ops);
855 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, TheCall);
856
857 SDOperand ResultVal;
858 switch (RetTyVT) {
859 case MVT::isVoid: break;
860 default:
861 ResultVal = TheCall.getValue(1);
862 break;
863 case MVT::i1:
864 case MVT::i8:
865 case MVT::i16:
866 ResultVal = DAG.getNode(ISD::TRUNCATE, RetTyVT, TheCall.getValue(1));
867 break;
868 case MVT::f32:
869 // FIXME: we would really like to remember that this FP_ROUND operation is
870 // okay to eliminate if we allow excess FP precision.
871 ResultVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, TheCall.getValue(1));
872 break;
873 case MVT::i64:
874 ResultVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, TheCall.getValue(1),
875 TheCall.getValue(2));
876 break;
877 }
878
879 return std::make_pair(ResultVal, Chain);
Chris Lattnerc6f41812005-05-12 23:06:28 +0000880}
881
Chris Lattner381e8872005-05-15 05:46:45 +0000882SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) {
883 if (ReturnAddrIndex == 0) {
884 // Set up a frame object for the return address.
885 MachineFunction &MF = DAG.getMachineFunction();
886 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4);
887 }
888
889 return DAG.getFrameIndex(ReturnAddrIndex, MVT::i32);
890}
Chris Lattnerc6f41812005-05-12 23:06:28 +0000891
892
Chris Lattner14824582005-01-09 00:01:27 +0000893
894std::pair<SDOperand, SDOperand> X86TargetLowering::
895LowerFrameReturnAddress(bool isFrameAddress, SDOperand Chain, unsigned Depth,
896 SelectionDAG &DAG) {
897 SDOperand Result;
898 if (Depth) // Depths > 0 not supported yet!
899 Result = DAG.getConstant(0, getPointerTy());
900 else {
Chris Lattner381e8872005-05-15 05:46:45 +0000901 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG);
Chris Lattner14824582005-01-09 00:01:27 +0000902 if (!isFrameAddress)
903 // Just load the return address
Chris Lattnerc6f41812005-05-12 23:06:28 +0000904 Result = DAG.getLoad(MVT::i32, DAG.getEntryNode(), RetAddrFI,
905 DAG.getSrcValue(NULL));
Chris Lattner14824582005-01-09 00:01:27 +0000906 else
907 Result = DAG.getNode(ISD::SUB, MVT::i32, RetAddrFI,
908 DAG.getConstant(4, MVT::i32));
909 }
910 return std::make_pair(Result, Chain);
911}
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000912
Chris Lattner67649df2005-05-14 06:52:07 +0000913/// LowerOperation - Provide custom lowering hooks for some operations.
914///
915SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
916 switch (Op.getOpcode()) {
917 default: assert(0 && "Should not custom lower this!");
918 case ISD::SINT_TO_FP:
919 assert(Op.getValueType() == MVT::f64 &&
920 Op.getOperand(0).getValueType() == MVT::i64 &&
921 "Unknown SINT_TO_FP to lower!");
922 // We lower sint64->FP into a store to a temporary stack slot, followed by a
923 // FILD64m node.
924 MachineFunction &MF = DAG.getMachineFunction();
925 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
926 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
927 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, DAG.getEntryNode(),
928 Op.getOperand(0), StackSlot, DAG.getSrcValue(NULL));
929 std::vector<MVT::ValueType> RTs;
930 RTs.push_back(MVT::f64);
931 RTs.push_back(MVT::Other);
932 std::vector<SDOperand> Ops;
933 Ops.push_back(Store);
934 Ops.push_back(StackSlot);
935 return DAG.getNode(X86ISD::FILD64m, RTs, Ops);
936 }
937}
938
939
940//===----------------------------------------------------------------------===//
941// Pattern Matcher Implementation
942//===----------------------------------------------------------------------===//
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000943
Chris Lattner98a8ba02005-01-18 01:06:26 +0000944namespace {
945 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses
946 /// SDOperand's instead of register numbers for the leaves of the matched
947 /// tree.
948 struct X86ISelAddressMode {
949 enum {
950 RegBase,
951 FrameIndexBase,
952 } BaseType;
Misha Brukman0e0a7a452005-04-21 23:38:14 +0000953
Chris Lattner98a8ba02005-01-18 01:06:26 +0000954 struct { // This is really a union, discriminated by BaseType!
955 SDOperand Reg;
956 int FrameIndex;
957 } Base;
Misha Brukman0e0a7a452005-04-21 23:38:14 +0000958
Chris Lattner98a8ba02005-01-18 01:06:26 +0000959 unsigned Scale;
960 SDOperand IndexReg;
961 unsigned Disp;
962 GlobalValue *GV;
Misha Brukman0e0a7a452005-04-21 23:38:14 +0000963
Chris Lattner98a8ba02005-01-18 01:06:26 +0000964 X86ISelAddressMode()
965 : BaseType(RegBase), Scale(1), IndexReg(), Disp(), GV(0) {
966 }
967 };
968}
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000969
970
971namespace {
972 Statistic<>
973 NumFPKill("x86-codegen", "Number of FP_REG_KILL instructions added");
974
975 //===--------------------------------------------------------------------===//
976 /// ISel - X86 specific code to select X86 machine instructions for
977 /// SelectionDAG operations.
978 ///
979 class ISel : public SelectionDAGISel {
980 /// ContainsFPCode - Every instruction we select that uses or defines a FP
981 /// register should set this to true.
982 bool ContainsFPCode;
983
984 /// X86Lowering - This object fully describes how to lower LLVM code to an
985 /// X86-specific SelectionDAG.
986 X86TargetLowering X86Lowering;
987
Chris Lattner11333092005-01-11 03:11:44 +0000988 /// RegPressureMap - This keeps an approximate count of the number of
989 /// registers required to evaluate each node in the graph.
990 std::map<SDNode*, unsigned> RegPressureMap;
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000991
992 /// ExprMap - As shared expressions are codegen'd, we keep track of which
993 /// vreg the value is produced in, so we only emit one copy of each compiled
994 /// tree.
995 std::map<SDOperand, unsigned> ExprMap;
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000996
Chris Lattner381e8872005-05-15 05:46:45 +0000997 /// TheDAG - The DAG being selected during Select* operations.
998 SelectionDAG *TheDAG;
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000999 public:
1000 ISel(TargetMachine &TM) : SelectionDAGISel(X86Lowering), X86Lowering(TM) {
1001 }
1002
Chris Lattner67b1c3c2005-01-21 21:35:14 +00001003 virtual const char *getPassName() const {
1004 return "X86 Pattern Instruction Selection";
1005 }
1006
Chris Lattner11333092005-01-11 03:11:44 +00001007 unsigned getRegPressure(SDOperand O) {
1008 return RegPressureMap[O.Val];
1009 }
1010 unsigned ComputeRegPressure(SDOperand O);
1011
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001012 /// InstructionSelectBasicBlock - This callback is invoked by
1013 /// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
Chris Lattner7dbcb752005-01-12 04:21:28 +00001014 virtual void InstructionSelectBasicBlock(SelectionDAG &DAG);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001015
Chris Lattner63602fb2005-05-13 07:38:09 +00001016 virtual void EmitFunctionEntryCode(Function &Fn, MachineFunction &MF);
1017
Chris Lattner44129b52005-01-25 20:03:11 +00001018 bool isFoldableLoad(SDOperand Op, SDOperand OtherOp,
1019 bool FloatPromoteOk = false);
Chris Lattnera5ade062005-01-11 21:19:59 +00001020 void EmitFoldedLoad(SDOperand Op, X86AddressMode &AM);
Chris Lattnere10269b2005-01-17 19:25:26 +00001021 bool TryToFoldLoadOpStore(SDNode *Node);
Chris Lattner30ea1e92005-01-19 07:37:26 +00001022 bool EmitOrOpOp(SDOperand Op1, SDOperand Op2, unsigned DestReg);
Chris Lattnercb1aa8d2005-01-17 01:34:14 +00001023 void EmitCMP(SDOperand LHS, SDOperand RHS, bool isOnlyUse);
Chris Lattner6c07aee2005-01-11 04:06:27 +00001024 bool EmitBranchCC(MachineBasicBlock *Dest, SDOperand Chain, SDOperand Cond);
Chris Lattner24aad1b2005-01-10 22:10:13 +00001025 void EmitSelectCC(SDOperand Cond, MVT::ValueType SVT,
1026 unsigned RTrue, unsigned RFalse, unsigned RDest);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001027 unsigned SelectExpr(SDOperand N);
Chris Lattner98a8ba02005-01-18 01:06:26 +00001028
1029 X86AddressMode SelectAddrExprs(const X86ISelAddressMode &IAM);
1030 bool MatchAddress(SDOperand N, X86ISelAddressMode &AM);
1031 void SelectAddress(SDOperand N, X86AddressMode &AM);
Chris Lattner381e8872005-05-15 05:46:45 +00001032 bool EmitPotentialTailCall(SDNode *Node);
1033 void EmitFastCCToFastCCTailCall(SDNode *TailCallNode);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001034 void Select(SDOperand N);
1035 };
1036}
1037
Chris Lattner6415bb42005-05-10 03:53:18 +00001038/// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
1039/// the main function.
1040static void EmitSpecialCodeForMain(MachineBasicBlock *BB,
1041 MachineFrameInfo *MFI) {
1042 // Switch the FPU to 64-bit precision mode for better compatibility and speed.
1043 int CWFrameIdx = MFI->CreateStackObject(2, 2);
1044 addFrameReference(BuildMI(BB, X86::FNSTCW16m, 4), CWFrameIdx);
1045
1046 // Set the high part to be 64-bit precision.
1047 addFrameReference(BuildMI(BB, X86::MOV8mi, 5),
1048 CWFrameIdx, 1).addImm(2);
1049
1050 // Reload the modified control word now.
1051 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
1052}
1053
Chris Lattner63602fb2005-05-13 07:38:09 +00001054void ISel::EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) {
1055 // If this function has live-in values, emit the copies from pregs to vregs at
1056 // the top of the function, before anything else.
1057 MachineBasicBlock *BB = MF.begin();
1058 if (MF.livein_begin() != MF.livein_end()) {
1059 SSARegMap *RegMap = MF.getSSARegMap();
1060 for (MachineFunction::livein_iterator LI = MF.livein_begin(),
1061 E = MF.livein_end(); LI != E; ++LI) {
1062 const TargetRegisterClass *RC = RegMap->getRegClass(LI->second);
1063 if (RC == X86::R8RegisterClass) {
1064 BuildMI(BB, X86::MOV8rr, 1, LI->second).addReg(LI->first);
1065 } else if (RC == X86::R16RegisterClass) {
1066 BuildMI(BB, X86::MOV16rr, 1, LI->second).addReg(LI->first);
1067 } else if (RC == X86::R32RegisterClass) {
1068 BuildMI(BB, X86::MOV32rr, 1, LI->second).addReg(LI->first);
1069 } else if (RC == X86::RFPRegisterClass) {
1070 BuildMI(BB, X86::FpMOV, 1, LI->second).addReg(LI->first);
Nate Begemanf63be7d2005-07-06 18:59:04 +00001071 } else if (RC == X86::RXMMRegisterClass) {
1072 BuildMI(BB, X86::MOVAPDrr, 1, LI->second).addReg(LI->first);
Chris Lattner63602fb2005-05-13 07:38:09 +00001073 } else {
1074 assert(0 && "Unknown regclass!");
1075 }
1076 }
1077 }
1078
1079
1080 // If this is main, emit special code for main.
1081 if (Fn.hasExternalLinkage() && Fn.getName() == "main")
1082 EmitSpecialCodeForMain(BB, MF.getFrameInfo());
1083}
1084
1085
Chris Lattner7dbcb752005-01-12 04:21:28 +00001086/// InstructionSelectBasicBlock - This callback is invoked by SelectionDAGISel
1087/// when it has created a SelectionDAG for us to codegen.
1088void ISel::InstructionSelectBasicBlock(SelectionDAG &DAG) {
1089 // While we're doing this, keep track of whether we see any FP code for
1090 // FP_REG_KILL insertion.
1091 ContainsFPCode = false;
Chris Lattner6415bb42005-05-10 03:53:18 +00001092 MachineFunction *MF = BB->getParent();
Chris Lattner7dbcb752005-01-12 04:21:28 +00001093
1094 // Scan the PHI nodes that already are inserted into this basic block. If any
1095 // of them is a PHI of a floating point value, we need to insert an
1096 // FP_REG_KILL.
Chris Lattner6415bb42005-05-10 03:53:18 +00001097 SSARegMap *RegMap = MF->getSSARegMap();
Chris Lattner63602fb2005-05-13 07:38:09 +00001098 if (BB != MF->begin())
1099 for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end();
1100 I != E; ++I) {
1101 assert(I->getOpcode() == X86::PHI &&
1102 "Isn't just PHI nodes?");
1103 if (RegMap->getRegClass(I->getOperand(0).getReg()) ==
1104 X86::RFPRegisterClass) {
1105 ContainsFPCode = true;
1106 break;
1107 }
Chris Lattner7dbcb752005-01-12 04:21:28 +00001108 }
Chris Lattner6415bb42005-05-10 03:53:18 +00001109
Chris Lattner7dbcb752005-01-12 04:21:28 +00001110 // Compute the RegPressureMap, which is an approximation for the number of
1111 // registers required to compute each node.
1112 ComputeRegPressure(DAG.getRoot());
1113
Chris Lattner381e8872005-05-15 05:46:45 +00001114 TheDAG = &DAG;
1115
Chris Lattner7dbcb752005-01-12 04:21:28 +00001116 // Codegen the basic block.
1117 Select(DAG.getRoot());
1118
Chris Lattner381e8872005-05-15 05:46:45 +00001119 TheDAG = 0;
1120
Chris Lattner7dbcb752005-01-12 04:21:28 +00001121 // Finally, look at all of the successors of this block. If any contain a PHI
1122 // node of FP type, we need to insert an FP_REG_KILL in this block.
1123 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
1124 E = BB->succ_end(); SI != E && !ContainsFPCode; ++SI)
1125 for (MachineBasicBlock::iterator I = (*SI)->begin(), E = (*SI)->end();
1126 I != E && I->getOpcode() == X86::PHI; ++I) {
1127 if (RegMap->getRegClass(I->getOperand(0).getReg()) ==
1128 X86::RFPRegisterClass) {
1129 ContainsFPCode = true;
1130 break;
1131 }
1132 }
Misha Brukman0e0a7a452005-04-21 23:38:14 +00001133
Chris Lattnere3e0f272005-05-09 03:36:39 +00001134 // Final check, check LLVM BB's that are successors to the LLVM BB
1135 // corresponding to BB for FP PHI nodes.
1136 const BasicBlock *LLVMBB = BB->getBasicBlock();
1137 const PHINode *PN;
1138 if (!ContainsFPCode)
1139 for (succ_const_iterator SI = succ_begin(LLVMBB), E = succ_end(LLVMBB);
1140 SI != E && !ContainsFPCode; ++SI)
1141 for (BasicBlock::const_iterator II = SI->begin();
1142 (PN = dyn_cast<PHINode>(II)); ++II)
1143 if (PN->getType()->isFloatingPoint()) {
1144 ContainsFPCode = true;
1145 break;
1146 }
1147
1148
Chris Lattner7dbcb752005-01-12 04:21:28 +00001149 // Insert FP_REG_KILL instructions into basic blocks that need them. This
1150 // only occurs due to the floating point stackifier not being aggressive
1151 // enough to handle arbitrary global stackification.
1152 //
1153 // Currently we insert an FP_REG_KILL instruction into each block that uses or
1154 // defines a floating point virtual register.
1155 //
1156 // When the global register allocators (like linear scan) finally update live
1157 // variable analysis, we can keep floating point values in registers across
1158 // basic blocks. This will be a huge win, but we are waiting on the global
1159 // allocators before we can do this.
1160 //
Chris Lattner71df3f82005-03-30 01:10:00 +00001161 if (ContainsFPCode) {
Chris Lattner7dbcb752005-01-12 04:21:28 +00001162 BuildMI(*BB, BB->getFirstTerminator(), X86::FP_REG_KILL, 0);
1163 ++NumFPKill;
1164 }
Misha Brukman0e0a7a452005-04-21 23:38:14 +00001165
Chris Lattner7dbcb752005-01-12 04:21:28 +00001166 // Clear state used for selection.
1167 ExprMap.clear();
Chris Lattner7dbcb752005-01-12 04:21:28 +00001168 RegPressureMap.clear();
1169}
1170
1171
Chris Lattner11333092005-01-11 03:11:44 +00001172// ComputeRegPressure - Compute the RegPressureMap, which is an approximation
1173// for the number of registers required to compute each node. This is basically
1174// computing a generalized form of the Sethi-Ullman number for each node.
1175unsigned ISel::ComputeRegPressure(SDOperand O) {
1176 SDNode *N = O.Val;
1177 unsigned &Result = RegPressureMap[N];
1178 if (Result) return Result;
1179
Chris Lattnera3aa2e22005-01-11 03:37:59 +00001180 // FIXME: Should operations like CALL (which clobber lots o regs) have a
1181 // higher fixed cost??
1182
Chris Lattnerc4b6a782005-01-11 22:29:12 +00001183 if (N->getNumOperands() == 0) {
1184 Result = 1;
1185 } else {
1186 unsigned MaxRegUse = 0;
1187 unsigned NumExtraMaxRegUsers = 0;
1188 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
1189 unsigned Regs;
1190 if (N->getOperand(i).getOpcode() == ISD::Constant)
1191 Regs = 0;
1192 else
1193 Regs = ComputeRegPressure(N->getOperand(i));
1194 if (Regs > MaxRegUse) {
1195 MaxRegUse = Regs;
1196 NumExtraMaxRegUsers = 0;
1197 } else if (Regs == MaxRegUse &&
1198 N->getOperand(i).getValueType() != MVT::Other) {
1199 ++NumExtraMaxRegUsers;
1200 }
Chris Lattner11333092005-01-11 03:11:44 +00001201 }
Chris Lattner90d1be72005-01-17 22:56:09 +00001202
1203 if (O.getOpcode() != ISD::TokenFactor)
1204 Result = MaxRegUse+NumExtraMaxRegUsers;
1205 else
Chris Lattner869e0432005-01-17 23:02:13 +00001206 Result = MaxRegUse == 1 ? 0 : MaxRegUse-1;
Chris Lattnerc4b6a782005-01-11 22:29:12 +00001207 }
Chris Lattnerafce4302005-01-12 02:19:06 +00001208
Chris Lattner837caa72005-01-11 23:21:30 +00001209 //std::cerr << " WEIGHT: " << Result << " "; N->dump(); std::cerr << "\n";
Chris Lattnerc4b6a782005-01-11 22:29:12 +00001210 return Result;
Chris Lattner11333092005-01-11 03:11:44 +00001211}
1212
Chris Lattnerbf52d492005-01-20 16:50:16 +00001213/// NodeTransitivelyUsesValue - Return true if N or any of its uses uses Op.
1214/// The DAG cannot have cycles in it, by definition, so the visited set is not
1215/// needed to prevent infinite loops. The DAG CAN, however, have unbounded
1216/// reuse, so it prevents exponential cases.
1217///
1218static bool NodeTransitivelyUsesValue(SDOperand N, SDOperand Op,
1219 std::set<SDNode*> &Visited) {
1220 if (N == Op) return true; // Found it.
1221 SDNode *Node = N.Val;
Chris Lattnerfb0f53f2005-01-21 21:43:02 +00001222 if (Node->getNumOperands() == 0 || // Leaf?
1223 Node->getNodeDepth() <= Op.getNodeDepth()) return false; // Can't find it?
Chris Lattnerbf52d492005-01-20 16:50:16 +00001224 if (!Visited.insert(Node).second) return false; // Already visited?
1225
1226 // Recurse for the first N-1 operands.
1227 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i)
1228 if (NodeTransitivelyUsesValue(Node->getOperand(i), Op, Visited))
1229 return true;
1230
1231 // Tail recurse for the last operand.
1232 return NodeTransitivelyUsesValue(Node->getOperand(0), Op, Visited);
1233}
1234
Chris Lattner98a8ba02005-01-18 01:06:26 +00001235X86AddressMode ISel::SelectAddrExprs(const X86ISelAddressMode &IAM) {
1236 X86AddressMode Result;
1237
1238 // If we need to emit two register operands, emit the one with the highest
1239 // register pressure first.
1240 if (IAM.BaseType == X86ISelAddressMode::RegBase &&
1241 IAM.Base.Reg.Val && IAM.IndexReg.Val) {
Chris Lattnerbf52d492005-01-20 16:50:16 +00001242 bool EmitBaseThenIndex;
Chris Lattner98a8ba02005-01-18 01:06:26 +00001243 if (getRegPressure(IAM.Base.Reg) > getRegPressure(IAM.IndexReg)) {
Chris Lattnerbf52d492005-01-20 16:50:16 +00001244 std::set<SDNode*> Visited;
1245 EmitBaseThenIndex = true;
1246 // If Base ends up pointing to Index, we must emit index first. This is
1247 // because of the way we fold loads, we may end up doing bad things with
1248 // the folded add.
1249 if (NodeTransitivelyUsesValue(IAM.Base.Reg, IAM.IndexReg, Visited))
1250 EmitBaseThenIndex = false;
1251 } else {
1252 std::set<SDNode*> Visited;
1253 EmitBaseThenIndex = false;
1254 // If Base ends up pointing to Index, we must emit index first. This is
1255 // because of the way we fold loads, we may end up doing bad things with
1256 // the folded add.
1257 if (NodeTransitivelyUsesValue(IAM.IndexReg, IAM.Base.Reg, Visited))
1258 EmitBaseThenIndex = true;
1259 }
1260
1261 if (EmitBaseThenIndex) {
Chris Lattner98a8ba02005-01-18 01:06:26 +00001262 Result.Base.Reg = SelectExpr(IAM.Base.Reg);
1263 Result.IndexReg = SelectExpr(IAM.IndexReg);
1264 } else {
1265 Result.IndexReg = SelectExpr(IAM.IndexReg);
1266 Result.Base.Reg = SelectExpr(IAM.Base.Reg);
1267 }
Chris Lattnerbf52d492005-01-20 16:50:16 +00001268
Chris Lattner98a8ba02005-01-18 01:06:26 +00001269 } else if (IAM.BaseType == X86ISelAddressMode::RegBase && IAM.Base.Reg.Val) {
1270 Result.Base.Reg = SelectExpr(IAM.Base.Reg);
1271 } else if (IAM.IndexReg.Val) {
1272 Result.IndexReg = SelectExpr(IAM.IndexReg);
1273 }
Misha Brukman0e0a7a452005-04-21 23:38:14 +00001274
Chris Lattner98a8ba02005-01-18 01:06:26 +00001275 switch (IAM.BaseType) {
1276 case X86ISelAddressMode::RegBase:
1277 Result.BaseType = X86AddressMode::RegBase;
1278 break;
1279 case X86ISelAddressMode::FrameIndexBase:
1280 Result.BaseType = X86AddressMode::FrameIndexBase;
1281 Result.Base.FrameIndex = IAM.Base.FrameIndex;
1282 break;
1283 default:
1284 assert(0 && "Unknown base type!");
1285 break;
1286 }
1287 Result.Scale = IAM.Scale;
1288 Result.Disp = IAM.Disp;
1289 Result.GV = IAM.GV;
1290 return Result;
1291}
1292
1293/// SelectAddress - Pattern match the maximal addressing mode for this node and
1294/// emit all of the leaf registers.
1295void ISel::SelectAddress(SDOperand N, X86AddressMode &AM) {
1296 X86ISelAddressMode IAM;
1297 MatchAddress(N, IAM);
1298 AM = SelectAddrExprs(IAM);
1299}
1300
1301/// MatchAddress - Add the specified node to the specified addressing mode,
1302/// returning true if it cannot be done. This just pattern matches for the
1303/// addressing mode, it does not cause any code to be emitted. For that, use
1304/// SelectAddress.
1305bool ISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM) {
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001306 switch (N.getOpcode()) {
1307 default: break;
1308 case ISD::FrameIndex:
Chris Lattner98a8ba02005-01-18 01:06:26 +00001309 if (AM.BaseType == X86ISelAddressMode::RegBase && AM.Base.Reg.Val == 0) {
1310 AM.BaseType = X86ISelAddressMode::FrameIndexBase;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001311 AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
1312 return false;
1313 }
1314 break;
1315 case ISD::GlobalAddress:
1316 if (AM.GV == 0) {
1317 AM.GV = cast<GlobalAddressSDNode>(N)->getGlobal();
1318 return false;
1319 }
1320 break;
1321 case ISD::Constant:
1322 AM.Disp += cast<ConstantSDNode>(N)->getValue();
1323 return false;
1324 case ISD::SHL:
Chris Lattner636e79a2005-01-13 05:53:16 +00001325 // We might have folded the load into this shift, so don't regen the value
1326 // if so.
1327 if (ExprMap.count(N)) break;
1328
Chris Lattner98a8ba02005-01-18 01:06:26 +00001329 if (AM.IndexReg.Val == 0 && AM.Scale == 1)
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001330 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1))) {
1331 unsigned Val = CN->getValue();
1332 if (Val == 1 || Val == 2 || Val == 3) {
1333 AM.Scale = 1 << Val;
Chris Lattner51a26342005-01-11 06:36:20 +00001334 SDOperand ShVal = N.Val->getOperand(0);
1335
1336 // Okay, we know that we have a scale by now. However, if the scaled
1337 // value is an add of something and a constant, we can fold the
1338 // constant into the disp field here.
Chris Lattner811482a2005-01-18 04:18:32 +00001339 if (ShVal.Val->getOpcode() == ISD::ADD && ShVal.hasOneUse() &&
Chris Lattner51a26342005-01-11 06:36:20 +00001340 isa<ConstantSDNode>(ShVal.Val->getOperand(1))) {
Chris Lattner98a8ba02005-01-18 01:06:26 +00001341 AM.IndexReg = ShVal.Val->getOperand(0);
Chris Lattner51a26342005-01-11 06:36:20 +00001342 ConstantSDNode *AddVal =
1343 cast<ConstantSDNode>(ShVal.Val->getOperand(1));
1344 AM.Disp += AddVal->getValue() << Val;
Chris Lattner636e79a2005-01-13 05:53:16 +00001345 } else {
Chris Lattner98a8ba02005-01-18 01:06:26 +00001346 AM.IndexReg = ShVal;
Chris Lattner51a26342005-01-11 06:36:20 +00001347 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001348 return false;
1349 }
1350 }
1351 break;
Chris Lattner947d5442005-01-11 19:37:02 +00001352 case ISD::MUL:
Chris Lattner636e79a2005-01-13 05:53:16 +00001353 // We might have folded the load into this mul, so don't regen the value if
1354 // so.
1355 if (ExprMap.count(N)) break;
1356
Chris Lattner947d5442005-01-11 19:37:02 +00001357 // X*[3,5,9] -> X+X*[2,4,8]
Chris Lattner98a8ba02005-01-18 01:06:26 +00001358 if (AM.IndexReg.Val == 0 && AM.BaseType == X86ISelAddressMode::RegBase &&
1359 AM.Base.Reg.Val == 0)
Chris Lattner947d5442005-01-11 19:37:02 +00001360 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1)))
1361 if (CN->getValue() == 3 || CN->getValue() == 5 || CN->getValue() == 9) {
1362 AM.Scale = unsigned(CN->getValue())-1;
1363
1364 SDOperand MulVal = N.Val->getOperand(0);
Chris Lattner98a8ba02005-01-18 01:06:26 +00001365 SDOperand Reg;
Chris Lattner947d5442005-01-11 19:37:02 +00001366
1367 // Okay, we know that we have a scale by now. However, if the scaled
1368 // value is an add of something and a constant, we can fold the
1369 // constant into the disp field here.
Chris Lattner811482a2005-01-18 04:18:32 +00001370 if (MulVal.Val->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
Chris Lattner947d5442005-01-11 19:37:02 +00001371 isa<ConstantSDNode>(MulVal.Val->getOperand(1))) {
Chris Lattner98a8ba02005-01-18 01:06:26 +00001372 Reg = MulVal.Val->getOperand(0);
Chris Lattner947d5442005-01-11 19:37:02 +00001373 ConstantSDNode *AddVal =
1374 cast<ConstantSDNode>(MulVal.Val->getOperand(1));
1375 AM.Disp += AddVal->getValue() * CN->getValue();
Misha Brukman0e0a7a452005-04-21 23:38:14 +00001376 } else {
Chris Lattner98a8ba02005-01-18 01:06:26 +00001377 Reg = N.Val->getOperand(0);
Chris Lattner947d5442005-01-11 19:37:02 +00001378 }
1379
1380 AM.IndexReg = AM.Base.Reg = Reg;
1381 return false;
1382 }
1383 break;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001384
1385 case ISD::ADD: {
Chris Lattner636e79a2005-01-13 05:53:16 +00001386 // We might have folded the load into this mul, so don't regen the value if
1387 // so.
1388 if (ExprMap.count(N)) break;
1389
Chris Lattner98a8ba02005-01-18 01:06:26 +00001390 X86ISelAddressMode Backup = AM;
1391 if (!MatchAddress(N.Val->getOperand(0), AM) &&
1392 !MatchAddress(N.Val->getOperand(1), AM))
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001393 return false;
1394 AM = Backup;
Chris Lattner98a8ba02005-01-18 01:06:26 +00001395 if (!MatchAddress(N.Val->getOperand(1), AM) &&
1396 !MatchAddress(N.Val->getOperand(0), AM))
Chris Lattner9bbd9922005-01-12 18:08:53 +00001397 return false;
1398 AM = Backup;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001399 break;
1400 }
1401 }
1402
Chris Lattnera95589b2005-01-11 04:40:19 +00001403 // Is the base register already occupied?
Chris Lattner98a8ba02005-01-18 01:06:26 +00001404 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base.Reg.Val) {
Chris Lattnera95589b2005-01-11 04:40:19 +00001405 // If so, check to see if the scale index register is set.
Chris Lattner98a8ba02005-01-18 01:06:26 +00001406 if (AM.IndexReg.Val == 0) {
1407 AM.IndexReg = N;
Chris Lattnera95589b2005-01-11 04:40:19 +00001408 AM.Scale = 1;
1409 return false;
1410 }
1411
1412 // Otherwise, we cannot select it.
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001413 return true;
Chris Lattnera95589b2005-01-11 04:40:19 +00001414 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001415
1416 // Default, generate it as a register.
Chris Lattner98a8ba02005-01-18 01:06:26 +00001417 AM.BaseType = X86ISelAddressMode::RegBase;
1418 AM.Base.Reg = N;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001419 return false;
1420}
1421
1422/// Emit2SetCCsAndLogical - Emit the following sequence of instructions,
1423/// assuming that the temporary registers are in the 8-bit register class.
1424///
1425/// Tmp1 = setcc1
1426/// Tmp2 = setcc2
1427/// DestReg = logicalop Tmp1, Tmp2
1428///
1429static void Emit2SetCCsAndLogical(MachineBasicBlock *BB, unsigned SetCC1,
1430 unsigned SetCC2, unsigned LogicalOp,
1431 unsigned DestReg) {
1432 SSARegMap *RegMap = BB->getParent()->getSSARegMap();
1433 unsigned Tmp1 = RegMap->createVirtualRegister(X86::R8RegisterClass);
1434 unsigned Tmp2 = RegMap->createVirtualRegister(X86::R8RegisterClass);
1435 BuildMI(BB, SetCC1, 0, Tmp1);
1436 BuildMI(BB, SetCC2, 0, Tmp2);
1437 BuildMI(BB, LogicalOp, 2, DestReg).addReg(Tmp1).addReg(Tmp2);
1438}
1439
1440/// EmitSetCC - Emit the code to set the specified 8-bit register to 1 if the
1441/// condition codes match the specified SetCCOpcode. Note that some conditions
1442/// require multiple instructions to generate the correct value.
1443static void EmitSetCC(MachineBasicBlock *BB, unsigned DestReg,
1444 ISD::CondCode SetCCOpcode, bool isFP) {
1445 unsigned Opc;
1446 if (!isFP) {
1447 switch (SetCCOpcode) {
1448 default: assert(0 && "Illegal integer SetCC!");
1449 case ISD::SETEQ: Opc = X86::SETEr; break;
1450 case ISD::SETGT: Opc = X86::SETGr; break;
1451 case ISD::SETGE: Opc = X86::SETGEr; break;
1452 case ISD::SETLT: Opc = X86::SETLr; break;
1453 case ISD::SETLE: Opc = X86::SETLEr; break;
1454 case ISD::SETNE: Opc = X86::SETNEr; break;
1455 case ISD::SETULT: Opc = X86::SETBr; break;
1456 case ISD::SETUGT: Opc = X86::SETAr; break;
1457 case ISD::SETULE: Opc = X86::SETBEr; break;
1458 case ISD::SETUGE: Opc = X86::SETAEr; break;
1459 }
1460 } else {
1461 // On a floating point condition, the flags are set as follows:
1462 // ZF PF CF op
1463 // 0 | 0 | 0 | X > Y
1464 // 0 | 0 | 1 | X < Y
1465 // 1 | 0 | 0 | X == Y
1466 // 1 | 1 | 1 | unordered
1467 //
1468 switch (SetCCOpcode) {
1469 default: assert(0 && "Invalid FP setcc!");
1470 case ISD::SETUEQ:
1471 case ISD::SETEQ:
1472 Opc = X86::SETEr; // True if ZF = 1
1473 break;
1474 case ISD::SETOGT:
1475 case ISD::SETGT:
1476 Opc = X86::SETAr; // True if CF = 0 and ZF = 0
1477 break;
1478 case ISD::SETOGE:
1479 case ISD::SETGE:
1480 Opc = X86::SETAEr; // True if CF = 0
1481 break;
1482 case ISD::SETULT:
1483 case ISD::SETLT:
1484 Opc = X86::SETBr; // True if CF = 1
1485 break;
1486 case ISD::SETULE:
1487 case ISD::SETLE:
1488 Opc = X86::SETBEr; // True if CF = 1 or ZF = 1
1489 break;
1490 case ISD::SETONE:
1491 case ISD::SETNE:
1492 Opc = X86::SETNEr; // True if ZF = 0
1493 break;
1494 case ISD::SETUO:
1495 Opc = X86::SETPr; // True if PF = 1
1496 break;
1497 case ISD::SETO:
1498 Opc = X86::SETNPr; // True if PF = 0
1499 break;
1500 case ISD::SETOEQ: // !PF & ZF
1501 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETEr, X86::AND8rr, DestReg);
1502 return;
1503 case ISD::SETOLT: // !PF & CF
1504 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETBr, X86::AND8rr, DestReg);
1505 return;
1506 case ISD::SETOLE: // !PF & (CF || ZF)
1507 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETBEr, X86::AND8rr, DestReg);
1508 return;
1509 case ISD::SETUGT: // PF | (!ZF & !CF)
1510 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETAr, X86::OR8rr, DestReg);
1511 return;
1512 case ISD::SETUGE: // PF | !CF
1513 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETAEr, X86::OR8rr, DestReg);
1514 return;
1515 case ISD::SETUNE: // PF | !ZF
1516 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETNEr, X86::OR8rr, DestReg);
1517 return;
1518 }
1519 }
1520 BuildMI(BB, Opc, 0, DestReg);
1521}
1522
1523
1524/// EmitBranchCC - Emit code into BB that arranges for control to transfer to
1525/// the Dest block if the Cond condition is true. If we cannot fold this
1526/// condition into the branch, return true.
1527///
Chris Lattner6c07aee2005-01-11 04:06:27 +00001528bool ISel::EmitBranchCC(MachineBasicBlock *Dest, SDOperand Chain,
1529 SDOperand Cond) {
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001530 // FIXME: Evaluate whether it would be good to emit code like (X < Y) | (A >
1531 // B) using two conditional branches instead of one condbr, two setcc's, and
1532 // an or.
1533 if ((Cond.getOpcode() == ISD::OR ||
1534 Cond.getOpcode() == ISD::AND) && Cond.Val->hasOneUse()) {
1535 // And and or set the flags for us, so there is no need to emit a TST of the
1536 // result. It is only safe to do this if there is only a single use of the
1537 // AND/OR though, otherwise we don't know it will be emitted here.
Chris Lattner6c07aee2005-01-11 04:06:27 +00001538 Select(Chain);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001539 SelectExpr(Cond);
1540 BuildMI(BB, X86::JNE, 1).addMBB(Dest);
1541 return false;
1542 }
1543
1544 // Codegen br not C -> JE.
1545 if (Cond.getOpcode() == ISD::XOR)
1546 if (ConstantSDNode *NC = dyn_cast<ConstantSDNode>(Cond.Val->getOperand(1)))
1547 if (NC->isAllOnesValue()) {
Chris Lattner6c07aee2005-01-11 04:06:27 +00001548 unsigned CondR;
1549 if (getRegPressure(Chain) > getRegPressure(Cond)) {
1550 Select(Chain);
1551 CondR = SelectExpr(Cond.Val->getOperand(0));
1552 } else {
1553 CondR = SelectExpr(Cond.Val->getOperand(0));
1554 Select(Chain);
1555 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001556 BuildMI(BB, X86::TEST8rr, 2).addReg(CondR).addReg(CondR);
1557 BuildMI(BB, X86::JE, 1).addMBB(Dest);
1558 return false;
1559 }
1560
1561 SetCCSDNode *SetCC = dyn_cast<SetCCSDNode>(Cond);
1562 if (SetCC == 0)
1563 return true; // Can only handle simple setcc's so far.
1564
1565 unsigned Opc;
1566
1567 // Handle integer conditions first.
1568 if (MVT::isInteger(SetCC->getOperand(0).getValueType())) {
1569 switch (SetCC->getCondition()) {
1570 default: assert(0 && "Illegal integer SetCC!");
1571 case ISD::SETEQ: Opc = X86::JE; break;
1572 case ISD::SETGT: Opc = X86::JG; break;
1573 case ISD::SETGE: Opc = X86::JGE; break;
1574 case ISD::SETLT: Opc = X86::JL; break;
1575 case ISD::SETLE: Opc = X86::JLE; break;
1576 case ISD::SETNE: Opc = X86::JNE; break;
1577 case ISD::SETULT: Opc = X86::JB; break;
1578 case ISD::SETUGT: Opc = X86::JA; break;
1579 case ISD::SETULE: Opc = X86::JBE; break;
1580 case ISD::SETUGE: Opc = X86::JAE; break;
1581 }
Chris Lattner6c07aee2005-01-11 04:06:27 +00001582 Select(Chain);
Chris Lattnercb1aa8d2005-01-17 01:34:14 +00001583 EmitCMP(SetCC->getOperand(0), SetCC->getOperand(1), SetCC->hasOneUse());
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001584 BuildMI(BB, Opc, 1).addMBB(Dest);
1585 return false;
1586 }
1587
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001588 unsigned Opc2 = 0; // Second branch if needed.
1589
1590 // On a floating point condition, the flags are set as follows:
1591 // ZF PF CF op
1592 // 0 | 0 | 0 | X > Y
1593 // 0 | 0 | 1 | X < Y
1594 // 1 | 0 | 0 | X == Y
1595 // 1 | 1 | 1 | unordered
1596 //
1597 switch (SetCC->getCondition()) {
1598 default: assert(0 && "Invalid FP setcc!");
1599 case ISD::SETUEQ:
1600 case ISD::SETEQ: Opc = X86::JE; break; // True if ZF = 1
1601 case ISD::SETOGT:
1602 case ISD::SETGT: Opc = X86::JA; break; // True if CF = 0 and ZF = 0
1603 case ISD::SETOGE:
1604 case ISD::SETGE: Opc = X86::JAE; break; // True if CF = 0
1605 case ISD::SETULT:
1606 case ISD::SETLT: Opc = X86::JB; break; // True if CF = 1
1607 case ISD::SETULE:
1608 case ISD::SETLE: Opc = X86::JBE; break; // True if CF = 1 or ZF = 1
1609 case ISD::SETONE:
1610 case ISD::SETNE: Opc = X86::JNE; break; // True if ZF = 0
1611 case ISD::SETUO: Opc = X86::JP; break; // True if PF = 1
1612 case ISD::SETO: Opc = X86::JNP; break; // True if PF = 0
1613 case ISD::SETUGT: // PF = 1 | (ZF = 0 & CF = 0)
1614 Opc = X86::JA; // ZF = 0 & CF = 0
1615 Opc2 = X86::JP; // PF = 1
1616 break;
1617 case ISD::SETUGE: // PF = 1 | CF = 0
1618 Opc = X86::JAE; // CF = 0
1619 Opc2 = X86::JP; // PF = 1
1620 break;
1621 case ISD::SETUNE: // PF = 1 | ZF = 0
1622 Opc = X86::JNE; // ZF = 0
1623 Opc2 = X86::JP; // PF = 1
1624 break;
1625 case ISD::SETOEQ: // PF = 0 & ZF = 1
1626 //X86::JNP, X86::JE
1627 //X86::AND8rr
1628 return true; // FIXME: Emit more efficient code for this branch.
1629 case ISD::SETOLT: // PF = 0 & CF = 1
1630 //X86::JNP, X86::JB
1631 //X86::AND8rr
1632 return true; // FIXME: Emit more efficient code for this branch.
1633 case ISD::SETOLE: // PF = 0 & (CF = 1 || ZF = 1)
1634 //X86::JNP, X86::JBE
1635 //X86::AND8rr
1636 return true; // FIXME: Emit more efficient code for this branch.
1637 }
1638
Chris Lattner6c07aee2005-01-11 04:06:27 +00001639 Select(Chain);
Chris Lattnercb1aa8d2005-01-17 01:34:14 +00001640 EmitCMP(SetCC->getOperand(0), SetCC->getOperand(1), SetCC->hasOneUse());
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001641 BuildMI(BB, Opc, 1).addMBB(Dest);
1642 if (Opc2)
1643 BuildMI(BB, Opc2, 1).addMBB(Dest);
1644 return false;
1645}
1646
Chris Lattner24aad1b2005-01-10 22:10:13 +00001647/// EmitSelectCC - Emit code into BB that performs a select operation between
1648/// the two registers RTrue and RFalse, generating a result into RDest. Return
1649/// true if the fold cannot be performed.
1650///
1651void ISel::EmitSelectCC(SDOperand Cond, MVT::ValueType SVT,
1652 unsigned RTrue, unsigned RFalse, unsigned RDest) {
1653 enum Condition {
1654 EQ, NE, LT, LE, GT, GE, B, BE, A, AE, P, NP,
1655 NOT_SET
1656 } CondCode = NOT_SET;
1657
1658 static const unsigned CMOVTAB16[] = {
1659 X86::CMOVE16rr, X86::CMOVNE16rr, X86::CMOVL16rr, X86::CMOVLE16rr,
1660 X86::CMOVG16rr, X86::CMOVGE16rr, X86::CMOVB16rr, X86::CMOVBE16rr,
Misha Brukman0e0a7a452005-04-21 23:38:14 +00001661 X86::CMOVA16rr, X86::CMOVAE16rr, X86::CMOVP16rr, X86::CMOVNP16rr,
Chris Lattner24aad1b2005-01-10 22:10:13 +00001662 };
1663 static const unsigned CMOVTAB32[] = {
1664 X86::CMOVE32rr, X86::CMOVNE32rr, X86::CMOVL32rr, X86::CMOVLE32rr,
1665 X86::CMOVG32rr, X86::CMOVGE32rr, X86::CMOVB32rr, X86::CMOVBE32rr,
Misha Brukman0e0a7a452005-04-21 23:38:14 +00001666 X86::CMOVA32rr, X86::CMOVAE32rr, X86::CMOVP32rr, X86::CMOVNP32rr,
Chris Lattner24aad1b2005-01-10 22:10:13 +00001667 };
1668 static const unsigned CMOVTABFP[] = {
1669 X86::FCMOVE , X86::FCMOVNE, /*missing*/0, /*missing*/0,
1670 /*missing*/0, /*missing*/0, X86::FCMOVB , X86::FCMOVBE,
1671 X86::FCMOVA , X86::FCMOVAE, X86::FCMOVP , X86::FCMOVNP
1672 };
Nate Begemanf63be7d2005-07-06 18:59:04 +00001673 static const unsigned SSE_CMOVTAB[] = {
1674 0 /* CMPEQSS */, 4 /* CMPNEQSS */, 1 /* CMPLTSS */, 2 /* CMPLESS */,
1675 2 /* CMPLESS */, 1 /* CMPLTSS */, /*missing*/0, /*missing*/0,
1676 /*missing*/0, /*missing*/0, /*missing*/0, /*missing*/0
1677 };
Chris Lattner24aad1b2005-01-10 22:10:13 +00001678
1679 if (SetCCSDNode *SetCC = dyn_cast<SetCCSDNode>(Cond)) {
1680 if (MVT::isInteger(SetCC->getOperand(0).getValueType())) {
1681 switch (SetCC->getCondition()) {
1682 default: assert(0 && "Unknown integer comparison!");
1683 case ISD::SETEQ: CondCode = EQ; break;
1684 case ISD::SETGT: CondCode = GT; break;
1685 case ISD::SETGE: CondCode = GE; break;
1686 case ISD::SETLT: CondCode = LT; break;
1687 case ISD::SETLE: CondCode = LE; break;
1688 case ISD::SETNE: CondCode = NE; break;
1689 case ISD::SETULT: CondCode = B; break;
1690 case ISD::SETUGT: CondCode = A; break;
1691 case ISD::SETULE: CondCode = BE; break;
1692 case ISD::SETUGE: CondCode = AE; break;
1693 }
Nate Begemanf63be7d2005-07-06 18:59:04 +00001694 } else if (X86ScalarSSE) {
1695 switch (SetCC->getCondition()) {
1696 default: assert(0 && "Unknown scalar fp comparison!");
1697 case ISD::SETEQ: CondCode = EQ; break;
1698 case ISD::SETNE: CondCode = NE; break;
1699 case ISD::SETULT:
1700 case ISD::SETLT: CondCode = LT; break;
1701 case ISD::SETULE:
1702 case ISD::SETLE: CondCode = LE; break;
1703 case ISD::SETUGT:
1704 case ISD::SETGT: CondCode = GT; break;
1705 case ISD::SETUGE:
1706 case ISD::SETGE: CondCode = GE; break;
1707 }
Chris Lattner24aad1b2005-01-10 22:10:13 +00001708 } else {
1709 // On a floating point condition, the flags are set as follows:
1710 // ZF PF CF op
1711 // 0 | 0 | 0 | X > Y
1712 // 0 | 0 | 1 | X < Y
1713 // 1 | 0 | 0 | X == Y
1714 // 1 | 1 | 1 | unordered
1715 //
1716 switch (SetCC->getCondition()) {
1717 default: assert(0 && "Unknown FP comparison!");
1718 case ISD::SETUEQ:
1719 case ISD::SETEQ: CondCode = EQ; break; // True if ZF = 1
1720 case ISD::SETOGT:
1721 case ISD::SETGT: CondCode = A; break; // True if CF = 0 and ZF = 0
1722 case ISD::SETOGE:
1723 case ISD::SETGE: CondCode = AE; break; // True if CF = 0
1724 case ISD::SETULT:
1725 case ISD::SETLT: CondCode = B; break; // True if CF = 1
1726 case ISD::SETULE:
1727 case ISD::SETLE: CondCode = BE; break; // True if CF = 1 or ZF = 1
1728 case ISD::SETONE:
1729 case ISD::SETNE: CondCode = NE; break; // True if ZF = 0
1730 case ISD::SETUO: CondCode = P; break; // True if PF = 1
1731 case ISD::SETO: CondCode = NP; break; // True if PF = 0
1732 case ISD::SETUGT: // PF = 1 | (ZF = 0 & CF = 0)
1733 case ISD::SETUGE: // PF = 1 | CF = 0
1734 case ISD::SETUNE: // PF = 1 | ZF = 0
1735 case ISD::SETOEQ: // PF = 0 & ZF = 1
1736 case ISD::SETOLT: // PF = 0 & CF = 1
1737 case ISD::SETOLE: // PF = 0 & (CF = 1 || ZF = 1)
1738 // We cannot emit this comparison as a single cmov.
1739 break;
1740 }
1741 }
1742 }
1743
Nate Begemanf63be7d2005-07-06 18:59:04 +00001744 // There's no SSE equivalent of FCMOVE. In some cases we can fake it up, in
1745 // Others we will have to do the PowerPC thing and generate an MBB for the
1746 // true and false values and select between them with a PHI.
1747 if (X86ScalarSSE) {
1748 if (CondCode != NOT_SET) {
1749 unsigned CMPSOpc = (SVT == MVT::f64) ? X86::CMPSDrr : X86::CMPSSrr;
1750 unsigned CMPSImm = SSE_CMOVTAB[CondCode];
1751 // FIXME check for min
1752 // FIXME check for max
1753 // FIXME check for reverse
1754 unsigned LHS = SelectExpr(Cond.getOperand(0));
1755 unsigned RHS = SelectExpr(Cond.getOperand(1));
1756 // emit compare mask
1757 unsigned MaskReg = MakeReg(SVT);
1758 BuildMI(BB, CMPSOpc, 3, MaskReg).addReg(LHS).addReg(RHS).addImm(CMPSImm);
1759 // emit and with mask
1760 unsigned TrueMask = MakeReg(SVT);
1761 unsigned AndOpc = (SVT == MVT::f32) ? X86::ANDPSrr : X86::ANDPDrr;
1762 BuildMI(BB, AndOpc, 2, TrueMask).addReg(RTrue).addReg(MaskReg);
1763 // emit and with inverse mask
1764 unsigned FalseMask = MakeReg(SVT);
1765 unsigned AndnOpc = (SVT == MVT::f32) ? X86::ANDNPSrr : X86::ANDNPDrr;
1766 BuildMI(BB, AndnOpc, 2, FalseMask).addReg(RFalse).addReg(MaskReg);
1767 // emit or into dest reg
1768 unsigned OROpc = (SVT == MVT::f32) ? X86::ORPSrr : X86::ORPDrr;
1769 BuildMI(BB, OROpc, 2, RDest).addReg(TrueMask).addReg(FalseMask);
1770 return;
1771 } else {
1772 // do the test and branch thing
1773 // Get the condition into the zero flag.
1774 unsigned CondReg = SelectExpr(Cond);
1775 BuildMI(BB, X86::TEST8rr, 2).addReg(CondReg).addReg(CondReg);
1776
1777 // Create an iterator with which to insert the MBB for copying the false
1778 // value and the MBB to hold the PHI instruction for this SetCC.
1779 MachineBasicBlock *thisMBB = BB;
1780 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1781 ilist<MachineBasicBlock>::iterator It = BB;
1782 ++It;
1783
1784 // thisMBB:
1785 // ...
1786 // TrueVal = ...
1787 // cmpTY ccX, r1, r2
1788 // bCC sinkMBB
1789 // fallthrough --> copy0MBB
1790 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB);
1791 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
1792 BuildMI(BB, X86::JNE, 1).addMBB(sinkMBB);
1793 MachineFunction *F = BB->getParent();
1794 F->getBasicBlockList().insert(It, copy0MBB);
1795 F->getBasicBlockList().insert(It, sinkMBB);
1796 // Update machine-CFG edges
1797 BB->addSuccessor(copy0MBB);
1798 BB->addSuccessor(sinkMBB);
1799
1800 // copy0MBB:
1801 // %FalseValue = ...
1802 // # fallthrough to sinkMBB
1803 BB = copy0MBB;
1804 // Update machine-CFG edges
1805 BB->addSuccessor(sinkMBB);
1806
1807 // sinkMBB:
1808 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1809 // ...
1810 BB = sinkMBB;
1811 BuildMI(BB, X86::PHI, 4, RDest).addReg(RFalse)
1812 .addMBB(copy0MBB).addReg(RTrue).addMBB(thisMBB);
1813 }
1814 return;
1815 }
1816
Chris Lattner24aad1b2005-01-10 22:10:13 +00001817 unsigned Opc = 0;
1818 if (CondCode != NOT_SET) {
1819 switch (SVT) {
1820 default: assert(0 && "Cannot select this type!");
1821 case MVT::i16: Opc = CMOVTAB16[CondCode]; break;
1822 case MVT::i32: Opc = CMOVTAB32[CondCode]; break;
Chris Lattneref7ba072005-01-11 03:50:45 +00001823 case MVT::f64: Opc = CMOVTABFP[CondCode]; break;
Chris Lattner24aad1b2005-01-10 22:10:13 +00001824 }
1825 }
Nate Begemanf63be7d2005-07-06 18:59:04 +00001826
Chris Lattner24aad1b2005-01-10 22:10:13 +00001827 // Finally, if we weren't able to fold this, just emit the condition and test
1828 // it.
1829 if (CondCode == NOT_SET || Opc == 0) {
1830 // Get the condition into the zero flag.
1831 unsigned CondReg = SelectExpr(Cond);
1832 BuildMI(BB, X86::TEST8rr, 2).addReg(CondReg).addReg(CondReg);
1833
1834 switch (SVT) {
1835 default: assert(0 && "Cannot select this type!");
1836 case MVT::i16: Opc = X86::CMOVE16rr; break;
1837 case MVT::i32: Opc = X86::CMOVE32rr; break;
Chris Lattneref7ba072005-01-11 03:50:45 +00001838 case MVT::f64: Opc = X86::FCMOVE; break;
Chris Lattner24aad1b2005-01-10 22:10:13 +00001839 }
1840 } else {
1841 // FIXME: CMP R, 0 -> TEST R, R
Chris Lattnercb1aa8d2005-01-17 01:34:14 +00001842 EmitCMP(Cond.getOperand(0), Cond.getOperand(1), Cond.Val->hasOneUse());
Chris Lattnera3aa2e22005-01-11 03:37:59 +00001843 std::swap(RTrue, RFalse);
Chris Lattner24aad1b2005-01-10 22:10:13 +00001844 }
1845 BuildMI(BB, Opc, 2, RDest).addReg(RTrue).addReg(RFalse);
1846}
1847
Chris Lattnercb1aa8d2005-01-17 01:34:14 +00001848void ISel::EmitCMP(SDOperand LHS, SDOperand RHS, bool HasOneUse) {
Chris Lattner11333092005-01-11 03:11:44 +00001849 unsigned Opc;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001850 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(RHS)) {
1851 Opc = 0;
Chris Lattner4ff348b2005-01-17 06:26:58 +00001852 if (HasOneUse && isFoldableLoad(LHS, RHS)) {
Chris Lattneref6806c2005-01-12 02:02:48 +00001853 switch (RHS.getValueType()) {
1854 default: break;
1855 case MVT::i1:
1856 case MVT::i8: Opc = X86::CMP8mi; break;
1857 case MVT::i16: Opc = X86::CMP16mi; break;
1858 case MVT::i32: Opc = X86::CMP32mi; break;
1859 }
1860 if (Opc) {
1861 X86AddressMode AM;
1862 EmitFoldedLoad(LHS, AM);
1863 addFullAddress(BuildMI(BB, Opc, 5), AM).addImm(CN->getValue());
1864 return;
1865 }
1866 }
1867
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001868 switch (RHS.getValueType()) {
1869 default: break;
1870 case MVT::i1:
1871 case MVT::i8: Opc = X86::CMP8ri; break;
1872 case MVT::i16: Opc = X86::CMP16ri; break;
1873 case MVT::i32: Opc = X86::CMP32ri; break;
1874 }
1875 if (Opc) {
Chris Lattner11333092005-01-11 03:11:44 +00001876 unsigned Tmp1 = SelectExpr(LHS);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001877 BuildMI(BB, Opc, 2).addReg(Tmp1).addImm(CN->getValue());
1878 return;
1879 }
Chris Lattner7f2afac2005-01-14 22:37:41 +00001880 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(RHS)) {
Nate Begemanf63be7d2005-07-06 18:59:04 +00001881 if (!X86ScalarSSE && (CN->isExactlyValue(+0.0) ||
1882 CN->isExactlyValue(-0.0))) {
Chris Lattner7f2afac2005-01-14 22:37:41 +00001883 unsigned Reg = SelectExpr(LHS);
1884 BuildMI(BB, X86::FTST, 1).addReg(Reg);
1885 BuildMI(BB, X86::FNSTSW8r, 0);
1886 BuildMI(BB, X86::SAHF, 1);
Chris Lattner7805fa42005-03-17 16:29:26 +00001887 return;
Chris Lattner7f2afac2005-01-14 22:37:41 +00001888 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001889 }
1890
Chris Lattneref6806c2005-01-12 02:02:48 +00001891 Opc = 0;
Chris Lattner4ff348b2005-01-17 06:26:58 +00001892 if (HasOneUse && isFoldableLoad(LHS, RHS)) {
Chris Lattneref6806c2005-01-12 02:02:48 +00001893 switch (RHS.getValueType()) {
1894 default: break;
1895 case MVT::i1:
1896 case MVT::i8: Opc = X86::CMP8mr; break;
1897 case MVT::i16: Opc = X86::CMP16mr; break;
1898 case MVT::i32: Opc = X86::CMP32mr; break;
1899 }
1900 if (Opc) {
1901 X86AddressMode AM;
Chris Lattner636e79a2005-01-13 05:53:16 +00001902 EmitFoldedLoad(LHS, AM);
1903 unsigned Reg = SelectExpr(RHS);
Chris Lattneref6806c2005-01-12 02:02:48 +00001904 addFullAddress(BuildMI(BB, Opc, 5), AM).addReg(Reg);
1905 return;
1906 }
1907 }
1908
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001909 switch (LHS.getValueType()) {
1910 default: assert(0 && "Cannot compare this value!");
1911 case MVT::i1:
1912 case MVT::i8: Opc = X86::CMP8rr; break;
1913 case MVT::i16: Opc = X86::CMP16rr; break;
1914 case MVT::i32: Opc = X86::CMP32rr; break;
Nate Begemanf63be7d2005-07-06 18:59:04 +00001915 case MVT::f32: Opc = X86::UCOMISSrr; break;
1916 case MVT::f64: Opc = X86ScalarSSE ? X86::UCOMISDrr : X86::FUCOMIr; break;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001917 }
Chris Lattner11333092005-01-11 03:11:44 +00001918 unsigned Tmp1, Tmp2;
1919 if (getRegPressure(LHS) > getRegPressure(RHS)) {
1920 Tmp1 = SelectExpr(LHS);
1921 Tmp2 = SelectExpr(RHS);
1922 } else {
1923 Tmp2 = SelectExpr(RHS);
1924 Tmp1 = SelectExpr(LHS);
1925 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001926 BuildMI(BB, Opc, 2).addReg(Tmp1).addReg(Tmp2);
1927}
1928
Chris Lattnera5ade062005-01-11 21:19:59 +00001929/// isFoldableLoad - Return true if this is a load instruction that can safely
1930/// be folded into an operation that uses it.
Chris Lattner44129b52005-01-25 20:03:11 +00001931bool ISel::isFoldableLoad(SDOperand Op, SDOperand OtherOp, bool FloatPromoteOk){
1932 if (Op.getOpcode() == ISD::LOAD) {
1933 // FIXME: currently can't fold constant pool indexes.
1934 if (isa<ConstantPoolSDNode>(Op.getOperand(1)))
1935 return false;
1936 } else if (FloatPromoteOk && Op.getOpcode() == ISD::EXTLOAD &&
1937 cast<MVTSDNode>(Op)->getExtraValueType() == MVT::f32) {
1938 // FIXME: currently can't fold constant pool indexes.
1939 if (isa<ConstantPoolSDNode>(Op.getOperand(1)))
1940 return false;
1941 } else {
Chris Lattnera5ade062005-01-11 21:19:59 +00001942 return false;
Chris Lattner44129b52005-01-25 20:03:11 +00001943 }
Chris Lattnera5ade062005-01-11 21:19:59 +00001944
1945 // If this load has already been emitted, we clearly can't fold it.
Chris Lattner636e79a2005-01-13 05:53:16 +00001946 assert(Op.ResNo == 0 && "Not a use of the value of the load?");
1947 if (ExprMap.count(Op.getValue(1))) return false;
1948 assert(!ExprMap.count(Op.getValue(0)) && "Value in map but not token chain?");
Chris Lattner4a108662005-01-18 03:51:59 +00001949 assert(!ExprMap.count(Op.getValue(1))&&"Token lowered but value not in map?");
Chris Lattnera5ade062005-01-11 21:19:59 +00001950
Chris Lattner4ff348b2005-01-17 06:26:58 +00001951 // If there is not just one use of its value, we cannot fold.
1952 if (!Op.Val->hasNUsesOfValue(1, 0)) return false;
1953
1954 // Finally, we cannot fold the load into the operation if this would induce a
1955 // cycle into the resultant dag. To check for this, see if OtherOp (the other
1956 // operand of the operation we are folding the load into) can possible use the
1957 // chain node defined by the load.
1958 if (OtherOp.Val && !Op.Val->hasNUsesOfValue(0, 1)) { // Has uses of chain?
1959 std::set<SDNode*> Visited;
1960 if (NodeTransitivelyUsesValue(OtherOp, Op.getValue(1), Visited))
1961 return false;
1962 }
1963 return true;
Chris Lattnera5ade062005-01-11 21:19:59 +00001964}
1965
Chris Lattner4ff348b2005-01-17 06:26:58 +00001966
Chris Lattnera5ade062005-01-11 21:19:59 +00001967/// EmitFoldedLoad - Ensure that the arguments of the load are code generated,
1968/// and compute the address being loaded into AM.
1969void ISel::EmitFoldedLoad(SDOperand Op, X86AddressMode &AM) {
1970 SDOperand Chain = Op.getOperand(0);
1971 SDOperand Address = Op.getOperand(1);
Chris Lattner98a8ba02005-01-18 01:06:26 +00001972
Chris Lattnera5ade062005-01-11 21:19:59 +00001973 if (getRegPressure(Chain) > getRegPressure(Address)) {
1974 Select(Chain);
1975 SelectAddress(Address, AM);
1976 } else {
1977 SelectAddress(Address, AM);
1978 Select(Chain);
1979 }
1980
1981 // The chain for this load is now lowered.
Chris Lattner636e79a2005-01-13 05:53:16 +00001982 assert(ExprMap.count(SDOperand(Op.Val, 1)) == 0 &&
1983 "Load emitted more than once?");
Chris Lattner4a108662005-01-18 03:51:59 +00001984 if (!ExprMap.insert(std::make_pair(Op.getValue(1), 1)).second)
Chris Lattner636e79a2005-01-13 05:53:16 +00001985 assert(0 && "Load emitted more than once!");
Chris Lattnera5ade062005-01-11 21:19:59 +00001986}
1987
Chris Lattner30ea1e92005-01-19 07:37:26 +00001988// EmitOrOpOp - Pattern match the expression (Op1|Op2), where we know that op1
1989// and op2 are i8/i16/i32 values with one use each (the or). If we can form a
1990// SHLD or SHRD, emit the instruction (generating the value into DestReg) and
1991// return true.
1992bool ISel::EmitOrOpOp(SDOperand Op1, SDOperand Op2, unsigned DestReg) {
Chris Lattner85716372005-01-19 06:18:43 +00001993 if (Op1.getOpcode() == ISD::SHL && Op2.getOpcode() == ISD::SRL) {
1994 // good!
1995 } else if (Op2.getOpcode() == ISD::SHL && Op1.getOpcode() == ISD::SRL) {
1996 std::swap(Op1, Op2); // Op1 is the SHL now.
1997 } else {
1998 return false; // No match
1999 }
2000
2001 SDOperand ShlVal = Op1.getOperand(0);
2002 SDOperand ShlAmt = Op1.getOperand(1);
2003 SDOperand ShrVal = Op2.getOperand(0);
2004 SDOperand ShrAmt = Op2.getOperand(1);
2005
Chris Lattner30ea1e92005-01-19 07:37:26 +00002006 unsigned RegSize = MVT::getSizeInBits(Op1.getValueType());
2007
Chris Lattner85716372005-01-19 06:18:43 +00002008 // Find out if ShrAmt = 32-ShlAmt or ShlAmt = 32-ShrAmt.
2009 if (ShlAmt.getOpcode() == ISD::SUB && ShlAmt.getOperand(1) == ShrAmt)
2010 if (ConstantSDNode *SubCST = dyn_cast<ConstantSDNode>(ShlAmt.getOperand(0)))
Chris Lattner4053b1e2005-01-19 08:07:05 +00002011 if (SubCST->getValue() == RegSize) {
2012 // (A >> ShrAmt) | (A << (32-ShrAmt)) ==> ROR A, ShrAmt
Chris Lattner85716372005-01-19 06:18:43 +00002013 // (A >> ShrAmt) | (B << (32-ShrAmt)) ==> SHRD A, B, ShrAmt
Chris Lattner4053b1e2005-01-19 08:07:05 +00002014 if (ShrVal == ShlVal) {
2015 unsigned Reg, ShAmt;
2016 if (getRegPressure(ShrVal) > getRegPressure(ShrAmt)) {
2017 Reg = SelectExpr(ShrVal);
2018 ShAmt = SelectExpr(ShrAmt);
2019 } else {
2020 ShAmt = SelectExpr(ShrAmt);
2021 Reg = SelectExpr(ShrVal);
2022 }
2023 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(ShAmt);
2024 unsigned Opc = RegSize == 8 ? X86::ROR8rCL :
2025 (RegSize == 16 ? X86::ROR16rCL : X86::ROR32rCL);
2026 BuildMI(BB, Opc, 1, DestReg).addReg(Reg);
2027 return true;
2028 } else if (RegSize != 8) {
Chris Lattner85716372005-01-19 06:18:43 +00002029 unsigned AReg, BReg;
2030 if (getRegPressure(ShlVal) > getRegPressure(ShrVal)) {
Chris Lattner85716372005-01-19 06:18:43 +00002031 BReg = SelectExpr(ShlVal);
Chris Lattnerc3c021b2005-01-19 17:24:34 +00002032 AReg = SelectExpr(ShrVal);
Chris Lattner85716372005-01-19 06:18:43 +00002033 } else {
Chris Lattner85716372005-01-19 06:18:43 +00002034 AReg = SelectExpr(ShrVal);
Chris Lattnerc3c021b2005-01-19 17:24:34 +00002035 BReg = SelectExpr(ShlVal);
Chris Lattner85716372005-01-19 06:18:43 +00002036 }
Chris Lattner4053b1e2005-01-19 08:07:05 +00002037 unsigned ShAmt = SelectExpr(ShrAmt);
2038 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(ShAmt);
2039 unsigned Opc = RegSize == 16 ? X86::SHRD16rrCL : X86::SHRD32rrCL;
2040 BuildMI(BB, Opc, 2, DestReg).addReg(AReg).addReg(BReg);
Chris Lattner85716372005-01-19 06:18:43 +00002041 return true;
2042 }
2043 }
2044
Chris Lattner4053b1e2005-01-19 08:07:05 +00002045 if (ShrAmt.getOpcode() == ISD::SUB && ShrAmt.getOperand(1) == ShlAmt)
2046 if (ConstantSDNode *SubCST = dyn_cast<ConstantSDNode>(ShrAmt.getOperand(0)))
2047 if (SubCST->getValue() == RegSize) {
2048 // (A << ShlAmt) | (A >> (32-ShlAmt)) ==> ROL A, ShrAmt
2049 // (A << ShlAmt) | (B >> (32-ShlAmt)) ==> SHLD A, B, ShrAmt
2050 if (ShrVal == ShlVal) {
2051 unsigned Reg, ShAmt;
2052 if (getRegPressure(ShrVal) > getRegPressure(ShlAmt)) {
2053 Reg = SelectExpr(ShrVal);
2054 ShAmt = SelectExpr(ShlAmt);
2055 } else {
2056 ShAmt = SelectExpr(ShlAmt);
2057 Reg = SelectExpr(ShrVal);
2058 }
2059 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(ShAmt);
2060 unsigned Opc = RegSize == 8 ? X86::ROL8rCL :
2061 (RegSize == 16 ? X86::ROL16rCL : X86::ROL32rCL);
2062 BuildMI(BB, Opc, 1, DestReg).addReg(Reg);
2063 return true;
2064 } else if (RegSize != 8) {
2065 unsigned AReg, BReg;
2066 if (getRegPressure(ShlVal) > getRegPressure(ShrVal)) {
Chris Lattnerc3c021b2005-01-19 17:24:34 +00002067 AReg = SelectExpr(ShlVal);
2068 BReg = SelectExpr(ShrVal);
Chris Lattner4053b1e2005-01-19 08:07:05 +00002069 } else {
Chris Lattnerc3c021b2005-01-19 17:24:34 +00002070 BReg = SelectExpr(ShrVal);
2071 AReg = SelectExpr(ShlVal);
Chris Lattner4053b1e2005-01-19 08:07:05 +00002072 }
2073 unsigned ShAmt = SelectExpr(ShlAmt);
2074 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(ShAmt);
2075 unsigned Opc = RegSize == 16 ? X86::SHLD16rrCL : X86::SHLD32rrCL;
2076 BuildMI(BB, Opc, 2, DestReg).addReg(AReg).addReg(BReg);
2077 return true;
2078 }
2079 }
Chris Lattner85716372005-01-19 06:18:43 +00002080
Chris Lattner4053b1e2005-01-19 08:07:05 +00002081 if (ConstantSDNode *ShrCst = dyn_cast<ConstantSDNode>(ShrAmt))
2082 if (ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(ShlAmt))
2083 if (ShrCst->getValue() < RegSize && ShlCst->getValue() < RegSize)
2084 if (ShrCst->getValue() == RegSize-ShlCst->getValue()) {
2085 // (A >> 5) | (A << 27) --> ROR A, 5
2086 // (A >> 5) | (B << 27) --> SHRD A, B, 5
2087 if (ShrVal == ShlVal) {
2088 unsigned Reg = SelectExpr(ShrVal);
2089 unsigned Opc = RegSize == 8 ? X86::ROR8ri :
2090 (RegSize == 16 ? X86::ROR16ri : X86::ROR32ri);
2091 BuildMI(BB, Opc, 2, DestReg).addReg(Reg).addImm(ShrCst->getValue());
2092 return true;
2093 } else if (RegSize != 8) {
2094 unsigned AReg, BReg;
2095 if (getRegPressure(ShlVal) > getRegPressure(ShrVal)) {
Chris Lattner4053b1e2005-01-19 08:07:05 +00002096 BReg = SelectExpr(ShlVal);
Chris Lattnerc3c021b2005-01-19 17:24:34 +00002097 AReg = SelectExpr(ShrVal);
Chris Lattner4053b1e2005-01-19 08:07:05 +00002098 } else {
Chris Lattner4053b1e2005-01-19 08:07:05 +00002099 AReg = SelectExpr(ShrVal);
Chris Lattnerc3c021b2005-01-19 17:24:34 +00002100 BReg = SelectExpr(ShlVal);
Chris Lattner4053b1e2005-01-19 08:07:05 +00002101 }
2102 unsigned Opc = RegSize == 16 ? X86::SHRD16rri8 : X86::SHRD32rri8;
2103 BuildMI(BB, Opc, 3, DestReg).addReg(AReg).addReg(BReg)
2104 .addImm(ShrCst->getValue());
2105 return true;
2106 }
2107 }
Misha Brukman0e0a7a452005-04-21 23:38:14 +00002108
Chris Lattner85716372005-01-19 06:18:43 +00002109 return false;
2110}
2111
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002112unsigned ISel::SelectExpr(SDOperand N) {
2113 unsigned Result;
2114 unsigned Tmp1, Tmp2, Tmp3;
2115 unsigned Opc = 0;
Chris Lattner5188ad72005-01-08 19:28:19 +00002116 SDNode *Node = N.Val;
Chris Lattnera5ade062005-01-11 21:19:59 +00002117 SDOperand Op0, Op1;
Chris Lattner5188ad72005-01-08 19:28:19 +00002118
Chris Lattner7f2afac2005-01-14 22:37:41 +00002119 if (Node->getOpcode() == ISD::CopyFromReg) {
Chris Lattnerc6f41812005-05-12 23:06:28 +00002120 if (MRegisterInfo::isVirtualRegister(cast<RegSDNode>(Node)->getReg()) ||
2121 cast<RegSDNode>(Node)->getReg() == X86::ESP) {
2122 // Just use the specified register as our input.
2123 return cast<RegSDNode>(Node)->getReg();
2124 }
Chris Lattner7f2afac2005-01-14 22:37:41 +00002125 }
Misha Brukman0e0a7a452005-04-21 23:38:14 +00002126
Chris Lattnera5ade062005-01-11 21:19:59 +00002127 unsigned &Reg = ExprMap[N];
2128 if (Reg) return Reg;
Misha Brukman0e0a7a452005-04-21 23:38:14 +00002129
Chris Lattnerb38a7492005-04-02 04:01:14 +00002130 switch (N.getOpcode()) {
2131 default:
Chris Lattnera5ade062005-01-11 21:19:59 +00002132 Reg = Result = (N.getValueType() != MVT::Other) ?
Chris Lattnerb38a7492005-04-02 04:01:14 +00002133 MakeReg(N.getValueType()) : 1;
2134 break;
Chris Lattner239738a2005-05-14 08:48:15 +00002135 case X86ISD::TAILCALL:
2136 case X86ISD::CALL:
Chris Lattnera5ade062005-01-11 21:19:59 +00002137 // If this is a call instruction, make sure to prepare ALL of the result
2138 // values as well as the chain.
Chris Lattner239738a2005-05-14 08:48:15 +00002139 ExprMap[N.getValue(0)] = 1;
2140 if (Node->getNumValues() > 1) {
2141 Result = MakeReg(Node->getValueType(1));
2142 ExprMap[N.getValue(1)] = Result;
2143 for (unsigned i = 2, e = Node->getNumValues(); i != e; ++i)
Chris Lattnera5ade062005-01-11 21:19:59 +00002144 ExprMap[N.getValue(i)] = MakeReg(Node->getValueType(i));
Chris Lattner239738a2005-05-14 08:48:15 +00002145 } else {
2146 Result = 1;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002147 }
Chris Lattnerb38a7492005-04-02 04:01:14 +00002148 break;
2149 case ISD::ADD_PARTS:
2150 case ISD::SUB_PARTS:
2151 case ISD::SHL_PARTS:
2152 case ISD::SRL_PARTS:
2153 case ISD::SRA_PARTS:
2154 Result = MakeReg(Node->getValueType(0));
2155 ExprMap[N.getValue(0)] = Result;
2156 for (unsigned i = 1, e = N.Val->getNumValues(); i != e; ++i)
2157 ExprMap[N.getValue(i)] = MakeReg(Node->getValueType(i));
2158 break;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002159 }
Misha Brukman0e0a7a452005-04-21 23:38:14 +00002160
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002161 switch (N.getOpcode()) {
2162 default:
Chris Lattner5188ad72005-01-08 19:28:19 +00002163 Node->dump();
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002164 assert(0 && "Node not handled!\n");
Nate Begemanf63be7d2005-07-06 18:59:04 +00002165 case ISD::FP_EXTEND:
2166 assert(X86ScalarSSE && "Scalar SSE FP must be enabled to use f32");
2167 Tmp1 = SelectExpr(N.getOperand(0));
2168 BuildMI(BB, X86::CVTSS2SDrr, 1, Result).addReg(Tmp1);
2169 return Result;
Chris Lattnerc6f41812005-05-12 23:06:28 +00002170 case ISD::CopyFromReg:
2171 Select(N.getOperand(0));
2172 if (Result == 1) {
2173 Reg = Result = ExprMap[N.getValue(0)] =
2174 MakeReg(N.getValue(0).getValueType());
2175 }
2176 switch (Node->getValueType(0)) {
2177 default: assert(0 && "Cannot CopyFromReg this!");
2178 case MVT::i1:
2179 case MVT::i8:
2180 BuildMI(BB, X86::MOV8rr, 1,
2181 Result).addReg(cast<RegSDNode>(Node)->getReg());
2182 return Result;
2183 case MVT::i16:
2184 BuildMI(BB, X86::MOV16rr, 1,
2185 Result).addReg(cast<RegSDNode>(Node)->getReg());
2186 return Result;
2187 case MVT::i32:
2188 BuildMI(BB, X86::MOV32rr, 1,
2189 Result).addReg(cast<RegSDNode>(Node)->getReg());
2190 return Result;
2191 }
2192
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002193 case ISD::FrameIndex:
2194 Tmp1 = cast<FrameIndexSDNode>(N)->getIndex();
2195 addFrameReference(BuildMI(BB, X86::LEA32r, 4, Result), (int)Tmp1);
2196 return Result;
2197 case ISD::ConstantPool:
2198 Tmp1 = cast<ConstantPoolSDNode>(N)->getIndex();
2199 addConstantPoolReference(BuildMI(BB, X86::LEA32r, 4, Result), Tmp1);
2200 return Result;
2201 case ISD::ConstantFP:
2202 ContainsFPCode = true;
2203 Tmp1 = Result; // Intermediate Register
2204 if (cast<ConstantFPSDNode>(N)->getValue() < 0.0 ||
2205 cast<ConstantFPSDNode>(N)->isExactlyValue(-0.0))
2206 Tmp1 = MakeReg(MVT::f64);
2207
2208 if (cast<ConstantFPSDNode>(N)->isExactlyValue(+0.0) ||
2209 cast<ConstantFPSDNode>(N)->isExactlyValue(-0.0))
2210 BuildMI(BB, X86::FLD0, 0, Tmp1);
2211 else if (cast<ConstantFPSDNode>(N)->isExactlyValue(+1.0) ||
2212 cast<ConstantFPSDNode>(N)->isExactlyValue(-1.0))
2213 BuildMI(BB, X86::FLD1, 0, Tmp1);
2214 else
2215 assert(0 && "Unexpected constant!");
2216 if (Tmp1 != Result)
2217 BuildMI(BB, X86::FCHS, 1, Result).addReg(Tmp1);
2218 return Result;
2219 case ISD::Constant:
2220 switch (N.getValueType()) {
2221 default: assert(0 && "Cannot use constants of this type!");
2222 case MVT::i1:
2223 case MVT::i8: Opc = X86::MOV8ri; break;
2224 case MVT::i16: Opc = X86::MOV16ri; break;
2225 case MVT::i32: Opc = X86::MOV32ri; break;
2226 }
2227 BuildMI(BB, Opc, 1,Result).addImm(cast<ConstantSDNode>(N)->getValue());
2228 return Result;
Chris Lattner7ce7eff2005-04-01 22:46:45 +00002229 case ISD::UNDEF:
2230 if (Node->getValueType(0) == MVT::f64) {
2231 // FIXME: SHOULD TEACH STACKIFIER ABOUT UNDEF VALUES!
2232 BuildMI(BB, X86::FLD0, 0, Result);
2233 } else {
2234 BuildMI(BB, X86::IMPLICIT_DEF, 0, Result);
2235 }
2236 return Result;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002237 case ISD::GlobalAddress: {
2238 GlobalValue *GV = cast<GlobalAddressSDNode>(N)->getGlobal();
2239 BuildMI(BB, X86::MOV32ri, 1, Result).addGlobalAddress(GV);
2240 return Result;
2241 }
2242 case ISD::ExternalSymbol: {
2243 const char *Sym = cast<ExternalSymbolSDNode>(N)->getSymbol();
2244 BuildMI(BB, X86::MOV32ri, 1, Result).addExternalSymbol(Sym);
2245 return Result;
2246 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002247 case ISD::ZERO_EXTEND: {
2248 int DestIs16 = N.getValueType() == MVT::i16;
2249 int SrcIs16 = N.getOperand(0).getValueType() == MVT::i16;
Chris Lattner590d8002005-01-09 18:52:44 +00002250
2251 // FIXME: This hack is here for zero extension casts from bool to i8. This
2252 // would not be needed if bools were promoted by Legalize.
2253 if (N.getValueType() == MVT::i8) {
Chris Lattnerdbba22f2005-01-11 23:33:00 +00002254 Tmp1 = SelectExpr(N.getOperand(0));
Chris Lattner590d8002005-01-09 18:52:44 +00002255 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(Tmp1);
2256 return Result;
2257 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002258
Chris Lattner4ff348b2005-01-17 06:26:58 +00002259 if (isFoldableLoad(N.getOperand(0), SDOperand())) {
Chris Lattnerdbba22f2005-01-11 23:33:00 +00002260 static const unsigned Opc[3] = {
2261 X86::MOVZX32rm8, X86::MOVZX32rm16, X86::MOVZX16rm8
2262 };
2263
2264 X86AddressMode AM;
2265 EmitFoldedLoad(N.getOperand(0), AM);
2266 addFullAddress(BuildMI(BB, Opc[SrcIs16+DestIs16*2], 4, Result), AM);
Misha Brukman0e0a7a452005-04-21 23:38:14 +00002267
Chris Lattnerdbba22f2005-01-11 23:33:00 +00002268 return Result;
2269 }
2270
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002271 static const unsigned Opc[3] = {
2272 X86::MOVZX32rr8, X86::MOVZX32rr16, X86::MOVZX16rr8
2273 };
Chris Lattnerdbba22f2005-01-11 23:33:00 +00002274 Tmp1 = SelectExpr(N.getOperand(0));
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002275 BuildMI(BB, Opc[SrcIs16+DestIs16*2], 1, Result).addReg(Tmp1);
2276 return Result;
Misha Brukman0e0a7a452005-04-21 23:38:14 +00002277 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002278 case ISD::SIGN_EXTEND: {
2279 int DestIs16 = N.getValueType() == MVT::i16;
2280 int SrcIs16 = N.getOperand(0).getValueType() == MVT::i16;
2281
Chris Lattner590d8002005-01-09 18:52:44 +00002282 // FIXME: Legalize should promote bools to i8!
2283 assert(N.getOperand(0).getValueType() != MVT::i1 &&
2284 "Sign extend from bool not implemented!");
2285
Chris Lattner4ff348b2005-01-17 06:26:58 +00002286 if (isFoldableLoad(N.getOperand(0), SDOperand())) {
Chris Lattnerdbba22f2005-01-11 23:33:00 +00002287 static const unsigned Opc[3] = {
2288 X86::MOVSX32rm8, X86::MOVSX32rm16, X86::MOVSX16rm8
2289 };
2290
2291 X86AddressMode AM;
2292 EmitFoldedLoad(N.getOperand(0), AM);
2293 addFullAddress(BuildMI(BB, Opc[SrcIs16+DestIs16*2], 4, Result), AM);
2294 return Result;
2295 }
2296
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002297 static const unsigned Opc[3] = {
2298 X86::MOVSX32rr8, X86::MOVSX32rr16, X86::MOVSX16rr8
2299 };
2300 Tmp1 = SelectExpr(N.getOperand(0));
2301 BuildMI(BB, Opc[SrcIs16+DestIs16*2], 1, Result).addReg(Tmp1);
2302 return Result;
2303 }
2304 case ISD::TRUNCATE:
Chris Lattnerafce4302005-01-12 02:19:06 +00002305 // Fold TRUNCATE (LOAD P) into a smaller load from P.
Chris Lattner477c9312005-01-18 20:05:56 +00002306 // FIXME: This should be performed by the DAGCombiner.
Chris Lattner4ff348b2005-01-17 06:26:58 +00002307 if (isFoldableLoad(N.getOperand(0), SDOperand())) {
Chris Lattnerafce4302005-01-12 02:19:06 +00002308 switch (N.getValueType()) {
2309 default: assert(0 && "Unknown truncate!");
2310 case MVT::i1:
2311 case MVT::i8: Opc = X86::MOV8rm; break;
2312 case MVT::i16: Opc = X86::MOV16rm; break;
2313 }
2314 X86AddressMode AM;
2315 EmitFoldedLoad(N.getOperand(0), AM);
2316 addFullAddress(BuildMI(BB, Opc, 4, Result), AM);
2317 return Result;
2318 }
2319
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002320 // Handle cast of LARGER int to SMALLER int using a move to EAX followed by
2321 // a move out of AX or AL.
2322 switch (N.getOperand(0).getValueType()) {
2323 default: assert(0 && "Unknown truncate!");
2324 case MVT::i8: Tmp2 = X86::AL; Opc = X86::MOV8rr; break;
2325 case MVT::i16: Tmp2 = X86::AX; Opc = X86::MOV16rr; break;
2326 case MVT::i32: Tmp2 = X86::EAX; Opc = X86::MOV32rr; break;
2327 }
2328 Tmp1 = SelectExpr(N.getOperand(0));
2329 BuildMI(BB, Opc, 1, Tmp2).addReg(Tmp1);
2330
2331 switch (N.getValueType()) {
2332 default: assert(0 && "Unknown truncate!");
2333 case MVT::i1:
2334 case MVT::i8: Tmp2 = X86::AL; Opc = X86::MOV8rr; break;
2335 case MVT::i16: Tmp2 = X86::AX; Opc = X86::MOV16rr; break;
2336 }
2337 BuildMI(BB, Opc, 1, Result).addReg(Tmp2);
2338 return Result;
2339
Chris Lattner590d8002005-01-09 18:52:44 +00002340 case ISD::SINT_TO_FP:
2341 case ISD::UINT_TO_FP: {
Nate Begemanf63be7d2005-07-06 18:59:04 +00002342 Tmp1 = SelectExpr(N.getOperand(0)); // Get the operand register
2343 unsigned PromoteOpcode = 0;
2344
2345 // We can handle any sint to fp, and 8 and 16 uint to fp with the direct
2346 // sse conversion instructions.
2347 if (X86ScalarSSE) {
2348 MVT::ValueType SrcTy = N.getOperand(0).getValueType();
2349 MVT::ValueType DstTy = N.getValueType();
2350 switch (SrcTy) {
2351 case MVT::i1:
2352 case MVT::i8:
2353 PromoteOpcode = (N.getOpcode() == ISD::UINT_TO_FP) ?
2354 X86::MOVZX32rr8 : X86::MOVSX32rr8;
2355 break;
2356 case MVT::i16:
2357 PromoteOpcode = (N.getOpcode() == ISD::UINT_TO_FP) ?
2358 X86::MOVZX32rr16 : X86::MOVSX32rr16;
2359 break;
2360 default:
2361 assert(N.getOpcode() != ISD::UINT_TO_FP);
2362 break;
2363 }
2364 if (PromoteOpcode) {
2365 BuildMI(BB, PromoteOpcode, 1, Tmp2).addReg(Tmp1);
2366 Tmp1 = Tmp2;
2367 }
2368 Opc = (DstTy == MVT::f64) ? X86::CVTSI2SDrr : X86::CVTSI2SSrr;
2369 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
2370 return Result;
2371 }
2372
Chris Lattner590d8002005-01-09 18:52:44 +00002373 // FIXME: Most of this grunt work should be done by legalize!
Chris Lattneref7ba072005-01-11 03:50:45 +00002374 ContainsFPCode = true;
Chris Lattner590d8002005-01-09 18:52:44 +00002375
2376 // Promote the integer to a type supported by FLD. We do this because there
2377 // are no unsigned FLD instructions, so we must promote an unsigned value to
2378 // a larger signed value, then use FLD on the larger value.
2379 //
2380 MVT::ValueType PromoteType = MVT::Other;
2381 MVT::ValueType SrcTy = N.getOperand(0).getValueType();
Chris Lattner8d4b9ed2005-07-07 17:12:53 +00002382 unsigned RealDestReg = Result;
Chris Lattner590d8002005-01-09 18:52:44 +00002383 switch (SrcTy) {
2384 case MVT::i1:
2385 case MVT::i8:
2386 // We don't have the facilities for directly loading byte sized data from
2387 // memory (even signed). Promote it to 16 bits.
2388 PromoteType = MVT::i16;
2389 PromoteOpcode = Node->getOpcode() == ISD::SINT_TO_FP ?
2390 X86::MOVSX16rr8 : X86::MOVZX16rr8;
2391 break;
2392 case MVT::i16:
2393 if (Node->getOpcode() == ISD::UINT_TO_FP) {
2394 PromoteType = MVT::i32;
2395 PromoteOpcode = X86::MOVZX32rr16;
2396 }
2397 break;
2398 default:
2399 // Don't fild into the real destination.
2400 if (Node->getOpcode() == ISD::UINT_TO_FP)
2401 Result = MakeReg(Node->getValueType(0));
2402 break;
2403 }
2404
Chris Lattner590d8002005-01-09 18:52:44 +00002405 if (PromoteType != MVT::Other) {
2406 Tmp2 = MakeReg(PromoteType);
2407 BuildMI(BB, PromoteOpcode, 1, Tmp2).addReg(Tmp1);
2408 SrcTy = PromoteType;
2409 Tmp1 = Tmp2;
2410 }
2411
2412 // Spill the integer to memory and reload it from there.
2413 unsigned Size = MVT::getSizeInBits(SrcTy)/8;
2414 MachineFunction *F = BB->getParent();
2415 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
2416
2417 switch (SrcTy) {
Chris Lattner590d8002005-01-09 18:52:44 +00002418 case MVT::i32:
2419 addFrameReference(BuildMI(BB, X86::MOV32mr, 5),
2420 FrameIdx).addReg(Tmp1);
2421 addFrameReference(BuildMI(BB, X86::FILD32m, 5, Result), FrameIdx);
2422 break;
2423 case MVT::i16:
2424 addFrameReference(BuildMI(BB, X86::MOV16mr, 5),
2425 FrameIdx).addReg(Tmp1);
2426 addFrameReference(BuildMI(BB, X86::FILD16m, 5, Result), FrameIdx);
2427 break;
2428 default: break; // No promotion required.
2429 }
Chris Lattner8d4b9ed2005-07-07 17:12:53 +00002430
2431 if (Node->getOpcode() == ISD::UINT_TO_FP && Result != RealDestReg) {
2432 // If this is a cast from uint -> double, we need to be careful when if
2433 // the "sign" bit is set. If so, we don't want to make a negative number,
2434 // we want to make a positive number. Emit code to add an offset if the
2435 // sign bit is set.
2436
2437 // Compute whether the sign bit is set by shifting the reg right 31 bits.
2438 unsigned IsNeg = MakeReg(MVT::i32);
2439 BuildMI(BB, X86::SHR32ri, 2, IsNeg).addReg(Tmp1).addImm(31);
2440
2441 // Create a CP value that has the offset in one word and 0 in the other.
2442 static ConstantInt *TheOffset = ConstantUInt::get(Type::ULongTy,
2443 0x4f80000000000000ULL);
2444 unsigned CPI = F->getConstantPool()->getConstantPoolIndex(TheOffset);
2445 BuildMI(BB, X86::FADD32m, 5, RealDestReg).addReg(Result)
2446 .addConstantPoolIndex(CPI).addZImm(4).addReg(IsNeg).addSImm(0);
2447 }
2448 return RealDestReg;
Chris Lattner590d8002005-01-09 18:52:44 +00002449 }
2450 case ISD::FP_TO_SINT:
2451 case ISD::FP_TO_UINT: {
2452 // FIXME: Most of this grunt work should be done by legalize!
2453 Tmp1 = SelectExpr(N.getOperand(0)); // Get the operand register
2454
Nate Begemanf63be7d2005-07-06 18:59:04 +00002455 // If the target supports SSE2 and is performing FP operations in SSE regs
2456 // instead of the FP stack, then we can use the efficient CVTSS2SI and
2457 // CVTSD2SI instructions.
2458 if (ISD::FP_TO_SINT == N.getOpcode() && X86ScalarSSE) {
2459 if (MVT::f32 == N.getOperand(0).getValueType()) {
2460 BuildMI(BB, X86::CVTSS2SIrr, 1, Result).addReg(Tmp1);
2461 } else if (MVT::f64 == N.getOperand(0).getValueType()) {
2462 BuildMI(BB, X86::CVTSD2SIrr, 1, Result).addReg(Tmp1);
2463 } else {
2464 assert(0 && "Not an f32 or f64?");
2465 abort();
2466 }
2467 return Result;
2468 }
2469
Chris Lattner590d8002005-01-09 18:52:44 +00002470 // Change the floating point control register to use "round towards zero"
2471 // mode when truncating to an integer value.
2472 //
2473 MachineFunction *F = BB->getParent();
2474 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
2475 addFrameReference(BuildMI(BB, X86::FNSTCW16m, 4), CWFrameIdx);
2476
2477 // Load the old value of the high byte of the control word...
2478 unsigned HighPartOfCW = MakeReg(MVT::i8);
2479 addFrameReference(BuildMI(BB, X86::MOV8rm, 4, HighPartOfCW),
2480 CWFrameIdx, 1);
2481
2482 // Set the high part to be round to zero...
2483 addFrameReference(BuildMI(BB, X86::MOV8mi, 5),
2484 CWFrameIdx, 1).addImm(12);
2485
2486 // Reload the modified control word now...
2487 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
Misha Brukman0e0a7a452005-04-21 23:38:14 +00002488
Chris Lattner590d8002005-01-09 18:52:44 +00002489 // Restore the memory image of control word to original value
2490 addFrameReference(BuildMI(BB, X86::MOV8mr, 5),
2491 CWFrameIdx, 1).addReg(HighPartOfCW);
2492
2493 // We don't have the facilities for directly storing byte sized data to
2494 // memory. Promote it to 16 bits. We also must promote unsigned values to
2495 // larger classes because we only have signed FP stores.
2496 MVT::ValueType StoreClass = Node->getValueType(0);
2497 if (StoreClass == MVT::i8 || Node->getOpcode() == ISD::FP_TO_UINT)
2498 switch (StoreClass) {
Chris Lattner2afa1912005-05-09 05:33:18 +00002499 case MVT::i1:
Chris Lattner590d8002005-01-09 18:52:44 +00002500 case MVT::i8: StoreClass = MVT::i16; break;
2501 case MVT::i16: StoreClass = MVT::i32; break;
2502 case MVT::i32: StoreClass = MVT::i64; break;
Chris Lattner590d8002005-01-09 18:52:44 +00002503 default: assert(0 && "Unknown store class!");
2504 }
2505
2506 // Spill the integer to memory and reload it from there.
2507 unsigned Size = MVT::getSizeInBits(StoreClass)/8;
2508 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
2509
2510 switch (StoreClass) {
2511 default: assert(0 && "Unknown store class!");
2512 case MVT::i16:
2513 addFrameReference(BuildMI(BB, X86::FIST16m, 5), FrameIdx).addReg(Tmp1);
2514 break;
2515 case MVT::i32:
Chris Lattner25020852005-01-09 19:49:59 +00002516 addFrameReference(BuildMI(BB, X86::FIST32m, 5), FrameIdx).addReg(Tmp1);
Chris Lattner590d8002005-01-09 18:52:44 +00002517 break;
Chris Lattnera0dbf182005-05-09 18:37:02 +00002518 case MVT::i64:
2519 addFrameReference(BuildMI(BB, X86::FISTP64m, 5), FrameIdx).addReg(Tmp1);
2520 break; }
Chris Lattner590d8002005-01-09 18:52:44 +00002521
2522 switch (Node->getValueType(0)) {
2523 default:
2524 assert(0 && "Unknown integer type!");
Chris Lattner590d8002005-01-09 18:52:44 +00002525 case MVT::i32:
2526 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Result), FrameIdx);
2527 break;
2528 case MVT::i16:
2529 addFrameReference(BuildMI(BB, X86::MOV16rm, 4, Result), FrameIdx);
2530 break;
2531 case MVT::i8:
Chris Lattner2afa1912005-05-09 05:33:18 +00002532 case MVT::i1:
Chris Lattner590d8002005-01-09 18:52:44 +00002533 addFrameReference(BuildMI(BB, X86::MOV8rm, 4, Result), FrameIdx);
2534 break;
2535 }
2536
2537 // Reload the original control word now.
2538 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
2539 return Result;
2540 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002541 case ISD::ADD:
Chris Lattnera5ade062005-01-11 21:19:59 +00002542 Op0 = N.getOperand(0);
2543 Op1 = N.getOperand(1);
2544
Chris Lattner44129b52005-01-25 20:03:11 +00002545 if (isFoldableLoad(Op0, Op1, true)) {
Chris Lattnera5ade062005-01-11 21:19:59 +00002546 std::swap(Op0, Op1);
Chris Lattner4ff348b2005-01-17 06:26:58 +00002547 goto FoldAdd;
2548 }
Chris Lattnera5ade062005-01-11 21:19:59 +00002549
Chris Lattner44129b52005-01-25 20:03:11 +00002550 if (isFoldableLoad(Op1, Op0, true)) {
Chris Lattner4ff348b2005-01-17 06:26:58 +00002551 FoldAdd:
Chris Lattnera5ade062005-01-11 21:19:59 +00002552 switch (N.getValueType()) {
2553 default: assert(0 && "Cannot add this type!");
2554 case MVT::i1:
2555 case MVT::i8: Opc = X86::ADD8rm; break;
2556 case MVT::i16: Opc = X86::ADD16rm; break;
2557 case MVT::i32: Opc = X86::ADD32rm; break;
Nate Begemanf63be7d2005-07-06 18:59:04 +00002558 case MVT::f32: Opc = X86::ADDSSrm; break;
Chris Lattner44129b52005-01-25 20:03:11 +00002559 case MVT::f64:
2560 // For F64, handle promoted load operations (from F32) as well!
Nate Begemanf63be7d2005-07-06 18:59:04 +00002561 if (X86ScalarSSE) {
2562 assert(Op1.getOpcode() == ISD::LOAD && "SSE load not promoted");
2563 Opc = X86::ADDSDrm;
2564 } else {
2565 Opc = Op1.getOpcode() == ISD::LOAD ? X86::FADD64m : X86::FADD32m;
2566 }
Chris Lattner44129b52005-01-25 20:03:11 +00002567 break;
Chris Lattnera5ade062005-01-11 21:19:59 +00002568 }
2569 X86AddressMode AM;
Chris Lattner636e79a2005-01-13 05:53:16 +00002570 EmitFoldedLoad(Op1, AM);
2571 Tmp1 = SelectExpr(Op0);
Chris Lattnera5ade062005-01-11 21:19:59 +00002572 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
2573 return Result;
2574 }
2575
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002576 // See if we can codegen this as an LEA to fold operations together.
2577 if (N.getValueType() == MVT::i32) {
Chris Lattner883c86f2005-01-18 02:25:52 +00002578 ExprMap.erase(N);
Chris Lattner98a8ba02005-01-18 01:06:26 +00002579 X86ISelAddressMode AM;
Chris Lattner883c86f2005-01-18 02:25:52 +00002580 MatchAddress(N, AM);
2581 ExprMap[N] = Result;
2582
2583 // If this is not just an add, emit the LEA. For a simple add (like
2584 // reg+reg or reg+imm), we just emit an add. It might be a good idea to
2585 // leave this as LEA, then peephole it to 'ADD' after two address elim
2586 // happens.
2587 if (AM.Scale != 1 || AM.BaseType == X86ISelAddressMode::FrameIndexBase||
2588 AM.GV || (AM.Base.Reg.Val && AM.IndexReg.Val && AM.Disp)) {
2589 X86AddressMode XAM = SelectAddrExprs(AM);
2590 addFullAddress(BuildMI(BB, X86::LEA32r, 4, Result), XAM);
2591 return Result;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002592 }
2593 }
Chris Lattner11333092005-01-11 03:11:44 +00002594
Chris Lattnera5ade062005-01-11 21:19:59 +00002595 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002596 Opc = 0;
2597 if (CN->getValue() == 1) { // add X, 1 -> inc X
2598 switch (N.getValueType()) {
2599 default: assert(0 && "Cannot integer add this type!");
2600 case MVT::i8: Opc = X86::INC8r; break;
2601 case MVT::i16: Opc = X86::INC16r; break;
2602 case MVT::i32: Opc = X86::INC32r; break;
2603 }
2604 } else if (CN->isAllOnesValue()) { // add X, -1 -> dec X
2605 switch (N.getValueType()) {
2606 default: assert(0 && "Cannot integer add this type!");
2607 case MVT::i8: Opc = X86::DEC8r; break;
2608 case MVT::i16: Opc = X86::DEC16r; break;
2609 case MVT::i32: Opc = X86::DEC32r; break;
2610 }
2611 }
2612
2613 if (Opc) {
Chris Lattnera5ade062005-01-11 21:19:59 +00002614 Tmp1 = SelectExpr(Op0);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002615 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
2616 return Result;
2617 }
2618
2619 switch (N.getValueType()) {
2620 default: assert(0 && "Cannot add this type!");
2621 case MVT::i8: Opc = X86::ADD8ri; break;
2622 case MVT::i16: Opc = X86::ADD16ri; break;
2623 case MVT::i32: Opc = X86::ADD32ri; break;
2624 }
2625 if (Opc) {
Chris Lattnera5ade062005-01-11 21:19:59 +00002626 Tmp1 = SelectExpr(Op0);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002627 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
2628 return Result;
2629 }
2630 }
2631
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002632 switch (N.getValueType()) {
2633 default: assert(0 && "Cannot add this type!");
2634 case MVT::i8: Opc = X86::ADD8rr; break;
2635 case MVT::i16: Opc = X86::ADD16rr; break;
2636 case MVT::i32: Opc = X86::ADD32rr; break;
Nate Begemanf63be7d2005-07-06 18:59:04 +00002637 case MVT::f32: Opc = X86::ADDSSrr; break;
2638 case MVT::f64: Opc = X86ScalarSSE ? X86::ADDSDrr : X86::FpADD; break;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002639 }
Chris Lattner11333092005-01-11 03:11:44 +00002640
Chris Lattnera5ade062005-01-11 21:19:59 +00002641 if (getRegPressure(Op0) > getRegPressure(Op1)) {
2642 Tmp1 = SelectExpr(Op0);
2643 Tmp2 = SelectExpr(Op1);
Chris Lattner11333092005-01-11 03:11:44 +00002644 } else {
Chris Lattnera5ade062005-01-11 21:19:59 +00002645 Tmp2 = SelectExpr(Op1);
2646 Tmp1 = SelectExpr(Op0);
Chris Lattner11333092005-01-11 03:11:44 +00002647 }
2648
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002649 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
2650 return Result;
Chris Lattnerb7edaa12005-04-02 05:30:17 +00002651
Nate Begemanf63be7d2005-07-06 18:59:04 +00002652 case ISD::FSQRT:
2653 Tmp1 = SelectExpr(Node->getOperand(0));
2654 if (X86ScalarSSE) {
2655 Opc = (N.getValueType() == MVT::f32) ? X86::SQRTSSrr : X86::SQRTSDrr;
2656 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
2657 } else {
2658 BuildMI(BB, X86::FSQRT, 1, Result).addReg(Tmp1);
2659 }
2660 return Result;
2661
2662 // FIXME:
2663 // Once we can spill 16 byte constants into the constant pool, we can
2664 // implement SSE equivalents of FABS and FCHS.
Chris Lattnerb7edaa12005-04-02 05:30:17 +00002665 case ISD::FABS:
Chris Lattnerb7edaa12005-04-02 05:30:17 +00002666 case ISD::FNEG:
Chris Lattnerc5dcb532005-04-30 04:25:35 +00002667 case ISD::FSIN:
2668 case ISD::FCOS:
Chris Lattner2c56e8a2005-04-28 22:07:18 +00002669 assert(N.getValueType()==MVT::f64 && "Illegal type for this operation");
Chris Lattnerb7edaa12005-04-02 05:30:17 +00002670 Tmp1 = SelectExpr(Node->getOperand(0));
Chris Lattner2c56e8a2005-04-28 22:07:18 +00002671 switch (N.getOpcode()) {
2672 default: assert(0 && "Unreachable!");
2673 case ISD::FABS: BuildMI(BB, X86::FABS, 1, Result).addReg(Tmp1); break;
2674 case ISD::FNEG: BuildMI(BB, X86::FCHS, 1, Result).addReg(Tmp1); break;
Chris Lattnerc5dcb532005-04-30 04:25:35 +00002675 case ISD::FSIN: BuildMI(BB, X86::FSIN, 1, Result).addReg(Tmp1); break;
2676 case ISD::FCOS: BuildMI(BB, X86::FCOS, 1, Result).addReg(Tmp1); break;
Chris Lattner2c56e8a2005-04-28 22:07:18 +00002677 }
Chris Lattnerb7edaa12005-04-02 05:30:17 +00002678 return Result;
2679
Chris Lattner8db0af12005-04-06 04:21:07 +00002680 case ISD::MULHU:
2681 switch (N.getValueType()) {
2682 default: assert(0 && "Unsupported VT!");
2683 case MVT::i8: Tmp2 = X86::MUL8r; break;
2684 case MVT::i16: Tmp2 = X86::MUL16r; break;
2685 case MVT::i32: Tmp2 = X86::MUL32r; break;
2686 }
2687 // FALL THROUGH
2688 case ISD::MULHS: {
2689 unsigned MovOpc, LowReg, HiReg;
2690 switch (N.getValueType()) {
2691 default: assert(0 && "Unsupported VT!");
Misha Brukman0e0a7a452005-04-21 23:38:14 +00002692 case MVT::i8:
Chris Lattner8db0af12005-04-06 04:21:07 +00002693 MovOpc = X86::MOV8rr;
2694 LowReg = X86::AL;
2695 HiReg = X86::AH;
2696 Opc = X86::IMUL8r;
2697 break;
2698 case MVT::i16:
2699 MovOpc = X86::MOV16rr;
2700 LowReg = X86::AX;
2701 HiReg = X86::DX;
2702 Opc = X86::IMUL16r;
2703 break;
2704 case MVT::i32:
2705 MovOpc = X86::MOV32rr;
2706 LowReg = X86::EAX;
2707 HiReg = X86::EDX;
2708 Opc = X86::IMUL32r;
2709 break;
2710 }
2711 if (Node->getOpcode() != ISD::MULHS)
2712 Opc = Tmp2; // Get the MULHU opcode.
2713
2714 Op0 = Node->getOperand(0);
2715 Op1 = Node->getOperand(1);
2716 if (getRegPressure(Op0) > getRegPressure(Op1)) {
2717 Tmp1 = SelectExpr(Op0);
2718 Tmp2 = SelectExpr(Op1);
2719 } else {
2720 Tmp2 = SelectExpr(Op1);
2721 Tmp1 = SelectExpr(Op0);
2722 }
2723
2724 // FIXME: Implement folding of loads into the memory operands here!
2725 BuildMI(BB, MovOpc, 1, LowReg).addReg(Tmp1);
2726 BuildMI(BB, Opc, 1).addReg(Tmp2);
2727 BuildMI(BB, MovOpc, 1, Result).addReg(HiReg);
2728 return Result;
Misha Brukman0e0a7a452005-04-21 23:38:14 +00002729 }
Chris Lattner8db0af12005-04-06 04:21:07 +00002730
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002731 case ISD::SUB:
Chris Lattnera5ade062005-01-11 21:19:59 +00002732 case ISD::MUL:
2733 case ISD::AND:
2734 case ISD::OR:
Chris Lattnera56cea42005-01-12 04:23:22 +00002735 case ISD::XOR: {
Chris Lattnera5ade062005-01-11 21:19:59 +00002736 static const unsigned SUBTab[] = {
2737 X86::SUB8ri, X86::SUB16ri, X86::SUB32ri, 0, 0,
2738 X86::SUB8rm, X86::SUB16rm, X86::SUB32rm, X86::FSUB32m, X86::FSUB64m,
2739 X86::SUB8rr, X86::SUB16rr, X86::SUB32rr, X86::FpSUB , X86::FpSUB,
2740 };
Nate Begemanf63be7d2005-07-06 18:59:04 +00002741 static const unsigned SSE_SUBTab[] = {
2742 X86::SUB8ri, X86::SUB16ri, X86::SUB32ri, 0, 0,
2743 X86::SUB8rm, X86::SUB16rm, X86::SUB32rm, X86::SUBSSrm, X86::SUBSDrm,
2744 X86::SUB8rr, X86::SUB16rr, X86::SUB32rr, X86::SUBSSrr, X86::SUBSDrr,
2745 };
Chris Lattnera5ade062005-01-11 21:19:59 +00002746 static const unsigned MULTab[] = {
2747 0, X86::IMUL16rri, X86::IMUL32rri, 0, 0,
2748 0, X86::IMUL16rm , X86::IMUL32rm, X86::FMUL32m, X86::FMUL64m,
2749 0, X86::IMUL16rr , X86::IMUL32rr, X86::FpMUL , X86::FpMUL,
2750 };
Nate Begemanf63be7d2005-07-06 18:59:04 +00002751 static const unsigned SSE_MULTab[] = {
2752 0, X86::IMUL16rri, X86::IMUL32rri, 0, 0,
2753 0, X86::IMUL16rm , X86::IMUL32rm, X86::MULSSrm, X86::MULSDrm,
2754 0, X86::IMUL16rr , X86::IMUL32rr, X86::MULSSrr, X86::MULSDrr,
2755 };
Chris Lattnera5ade062005-01-11 21:19:59 +00002756 static const unsigned ANDTab[] = {
2757 X86::AND8ri, X86::AND16ri, X86::AND32ri, 0, 0,
2758 X86::AND8rm, X86::AND16rm, X86::AND32rm, 0, 0,
Misha Brukman0e0a7a452005-04-21 23:38:14 +00002759 X86::AND8rr, X86::AND16rr, X86::AND32rr, 0, 0,
Chris Lattnera5ade062005-01-11 21:19:59 +00002760 };
2761 static const unsigned ORTab[] = {
2762 X86::OR8ri, X86::OR16ri, X86::OR32ri, 0, 0,
2763 X86::OR8rm, X86::OR16rm, X86::OR32rm, 0, 0,
2764 X86::OR8rr, X86::OR16rr, X86::OR32rr, 0, 0,
2765 };
2766 static const unsigned XORTab[] = {
2767 X86::XOR8ri, X86::XOR16ri, X86::XOR32ri, 0, 0,
2768 X86::XOR8rm, X86::XOR16rm, X86::XOR32rm, 0, 0,
2769 X86::XOR8rr, X86::XOR16rr, X86::XOR32rr, 0, 0,
2770 };
2771
2772 Op0 = Node->getOperand(0);
2773 Op1 = Node->getOperand(1);
2774
Chris Lattner30ea1e92005-01-19 07:37:26 +00002775 if (Node->getOpcode() == ISD::OR && Op0.hasOneUse() && Op1.hasOneUse())
2776 if (EmitOrOpOp(Op0, Op1, Result)) // Match SHLD, SHRD, and rotates.
Chris Lattner85716372005-01-19 06:18:43 +00002777 return Result;
2778
2779 if (Node->getOpcode() == ISD::SUB)
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002780 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(0)))
2781 if (CN->isNullValue()) { // 0 - N -> neg N
2782 switch (N.getValueType()) {
2783 default: assert(0 && "Cannot sub this type!");
2784 case MVT::i1:
2785 case MVT::i8: Opc = X86::NEG8r; break;
2786 case MVT::i16: Opc = X86::NEG16r; break;
2787 case MVT::i32: Opc = X86::NEG32r; break;
2788 }
2789 Tmp1 = SelectExpr(N.getOperand(1));
2790 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
2791 return Result;
2792 }
2793
Chris Lattnera5ade062005-01-11 21:19:59 +00002794 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
2795 if (CN->isAllOnesValue() && Node->getOpcode() == ISD::XOR) {
Chris Lattnerc98279d2005-01-17 00:23:16 +00002796 Opc = 0;
Chris Lattnerd4dab922005-01-11 04:31:30 +00002797 switch (N.getValueType()) {
2798 default: assert(0 && "Cannot add this type!");
Chris Lattnerc98279d2005-01-17 00:23:16 +00002799 case MVT::i1: break; // Not supported, don't invert upper bits!
Chris Lattnerd4dab922005-01-11 04:31:30 +00002800 case MVT::i8: Opc = X86::NOT8r; break;
2801 case MVT::i16: Opc = X86::NOT16r; break;
2802 case MVT::i32: Opc = X86::NOT32r; break;
2803 }
Chris Lattnerc98279d2005-01-17 00:23:16 +00002804 if (Opc) {
2805 Tmp1 = SelectExpr(Op0);
2806 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
2807 return Result;
2808 }
Chris Lattnerd4dab922005-01-11 04:31:30 +00002809 }
2810
Chris Lattner2a4e5082005-01-17 06:48:02 +00002811 // Fold common multiplies into LEA instructions.
2812 if (Node->getOpcode() == ISD::MUL && N.getValueType() == MVT::i32) {
2813 switch ((int)CN->getValue()) {
2814 default: break;
2815 case 3:
2816 case 5:
2817 case 9:
Chris Lattner2a4e5082005-01-17 06:48:02 +00002818 // Remove N from exprmap so SelectAddress doesn't get confused.
2819 ExprMap.erase(N);
Chris Lattner98a8ba02005-01-18 01:06:26 +00002820 X86AddressMode AM;
Chris Lattner2a4e5082005-01-17 06:48:02 +00002821 SelectAddress(N, AM);
2822 // Restore it to the map.
2823 ExprMap[N] = Result;
2824 addFullAddress(BuildMI(BB, X86::LEA32r, 4, Result), AM);
2825 return Result;
2826 }
2827 }
2828
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002829 switch (N.getValueType()) {
Chris Lattnerd4dab922005-01-11 04:31:30 +00002830 default: assert(0 && "Cannot xor this type!");
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002831 case MVT::i1:
Chris Lattnera5ade062005-01-11 21:19:59 +00002832 case MVT::i8: Opc = 0; break;
2833 case MVT::i16: Opc = 1; break;
2834 case MVT::i32: Opc = 2; break;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002835 }
Chris Lattnera5ade062005-01-11 21:19:59 +00002836 switch (Node->getOpcode()) {
2837 default: assert(0 && "Unreachable!");
Nate Begemanf63be7d2005-07-06 18:59:04 +00002838 case ISD::SUB: Opc = X86ScalarSSE ? SSE_SUBTab[Opc] : SUBTab[Opc]; break;
2839 case ISD::MUL: Opc = X86ScalarSSE ? SSE_MULTab[Opc] : MULTab[Opc]; break;
Chris Lattnera5ade062005-01-11 21:19:59 +00002840 case ISD::AND: Opc = ANDTab[Opc]; break;
2841 case ISD::OR: Opc = ORTab[Opc]; break;
2842 case ISD::XOR: Opc = XORTab[Opc]; break;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002843 }
Chris Lattnera5ade062005-01-11 21:19:59 +00002844 if (Opc) { // Can't fold MUL:i8 R, imm
2845 Tmp1 = SelectExpr(Op0);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002846 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
2847 return Result;
2848 }
2849 }
Chris Lattner11333092005-01-11 03:11:44 +00002850
Chris Lattner44129b52005-01-25 20:03:11 +00002851 if (isFoldableLoad(Op0, Op1, true))
Chris Lattnera5ade062005-01-11 21:19:59 +00002852 if (Node->getOpcode() != ISD::SUB) {
2853 std::swap(Op0, Op1);
Chris Lattner4ff348b2005-01-17 06:26:58 +00002854 goto FoldOps;
Chris Lattnera5ade062005-01-11 21:19:59 +00002855 } else {
Chris Lattner44129b52005-01-25 20:03:11 +00002856 // For FP, emit 'reverse' subract, with a memory operand.
Nate Begemanf63be7d2005-07-06 18:59:04 +00002857 if (N.getValueType() == MVT::f64 && !X86ScalarSSE) {
Chris Lattner44129b52005-01-25 20:03:11 +00002858 if (Op0.getOpcode() == ISD::EXTLOAD)
2859 Opc = X86::FSUBR32m;
2860 else
2861 Opc = X86::FSUBR64m;
2862
Chris Lattnera5ade062005-01-11 21:19:59 +00002863 X86AddressMode AM;
Chris Lattner636e79a2005-01-13 05:53:16 +00002864 EmitFoldedLoad(Op0, AM);
2865 Tmp1 = SelectExpr(Op1);
Chris Lattnera5ade062005-01-11 21:19:59 +00002866 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
2867 return Result;
2868 }
2869 }
2870
Chris Lattner44129b52005-01-25 20:03:11 +00002871 if (isFoldableLoad(Op1, Op0, true)) {
Chris Lattner4ff348b2005-01-17 06:26:58 +00002872 FoldOps:
Chris Lattnera5ade062005-01-11 21:19:59 +00002873 switch (N.getValueType()) {
2874 default: assert(0 && "Cannot operate on this type!");
2875 case MVT::i1:
2876 case MVT::i8: Opc = 5; break;
2877 case MVT::i16: Opc = 6; break;
2878 case MVT::i32: Opc = 7; break;
Nate Begemanf63be7d2005-07-06 18:59:04 +00002879 case MVT::f32: Opc = 8; break;
Chris Lattner44129b52005-01-25 20:03:11 +00002880 // For F64, handle promoted load operations (from F32) as well!
Nate Begemanf63be7d2005-07-06 18:59:04 +00002881 case MVT::f64:
2882 assert((!X86ScalarSSE || Op1.getOpcode() == ISD::LOAD) &&
2883 "SSE load should have been promoted");
2884 Opc = Op1.getOpcode() == ISD::LOAD ? 9 : 8; break;
Chris Lattnera5ade062005-01-11 21:19:59 +00002885 }
2886 switch (Node->getOpcode()) {
2887 default: assert(0 && "Unreachable!");
Nate Begemanf63be7d2005-07-06 18:59:04 +00002888 case ISD::SUB: Opc = X86ScalarSSE ? SSE_SUBTab[Opc] : SUBTab[Opc]; break;
2889 case ISD::MUL: Opc = X86ScalarSSE ? SSE_MULTab[Opc] : MULTab[Opc]; break;
Chris Lattnera5ade062005-01-11 21:19:59 +00002890 case ISD::AND: Opc = ANDTab[Opc]; break;
2891 case ISD::OR: Opc = ORTab[Opc]; break;
2892 case ISD::XOR: Opc = XORTab[Opc]; break;
2893 }
2894
2895 X86AddressMode AM;
Chris Lattner636e79a2005-01-13 05:53:16 +00002896 EmitFoldedLoad(Op1, AM);
2897 Tmp1 = SelectExpr(Op0);
Chris Lattnera5ade062005-01-11 21:19:59 +00002898 if (Opc) {
2899 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
2900 } else {
2901 assert(Node->getOpcode() == ISD::MUL &&
2902 N.getValueType() == MVT::i8 && "Unexpected situation!");
2903 // Must use the MUL instruction, which forces use of AL.
2904 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(Tmp1);
2905 addFullAddress(BuildMI(BB, X86::MUL8m, 1), AM);
2906 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
2907 }
2908 return Result;
Chris Lattner11333092005-01-11 03:11:44 +00002909 }
Chris Lattnera5ade062005-01-11 21:19:59 +00002910
2911 if (getRegPressure(Op0) > getRegPressure(Op1)) {
2912 Tmp1 = SelectExpr(Op0);
2913 Tmp2 = SelectExpr(Op1);
2914 } else {
2915 Tmp2 = SelectExpr(Op1);
2916 Tmp1 = SelectExpr(Op0);
2917 }
2918
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002919 switch (N.getValueType()) {
2920 default: assert(0 && "Cannot add this type!");
Chris Lattnera5ade062005-01-11 21:19:59 +00002921 case MVT::i1:
2922 case MVT::i8: Opc = 10; break;
2923 case MVT::i16: Opc = 11; break;
2924 case MVT::i32: Opc = 12; break;
2925 case MVT::f32: Opc = 13; break;
2926 case MVT::f64: Opc = 14; break;
2927 }
2928 switch (Node->getOpcode()) {
2929 default: assert(0 && "Unreachable!");
Nate Begemanf63be7d2005-07-06 18:59:04 +00002930 case ISD::SUB: Opc = X86ScalarSSE ? SSE_SUBTab[Opc] : SUBTab[Opc]; break;
2931 case ISD::MUL: Opc = X86ScalarSSE ? SSE_MULTab[Opc] : MULTab[Opc]; break;
Chris Lattnera5ade062005-01-11 21:19:59 +00002932 case ISD::AND: Opc = ANDTab[Opc]; break;
2933 case ISD::OR: Opc = ORTab[Opc]; break;
2934 case ISD::XOR: Opc = XORTab[Opc]; break;
2935 }
2936 if (Opc) {
2937 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
2938 } else {
2939 assert(Node->getOpcode() == ISD::MUL &&
2940 N.getValueType() == MVT::i8 && "Unexpected situation!");
Chris Lattnera13d3232005-01-10 20:55:48 +00002941 // Must use the MUL instruction, which forces use of AL.
2942 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(Tmp1);
2943 BuildMI(BB, X86::MUL8r, 1).addReg(Tmp2);
2944 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002945 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002946 return Result;
Chris Lattnera56cea42005-01-12 04:23:22 +00002947 }
Chris Lattner19ad0622005-01-20 18:53:00 +00002948 case ISD::ADD_PARTS:
2949 case ISD::SUB_PARTS: {
2950 assert(N.getNumOperands() == 4 && N.getValueType() == MVT::i32 &&
2951 "Not an i64 add/sub!");
2952 // Emit all of the operands.
2953 std::vector<unsigned> InVals;
2954 for (unsigned i = 0, e = N.getNumOperands(); i != e; ++i)
2955 InVals.push_back(SelectExpr(N.getOperand(i)));
2956 if (N.getOpcode() == ISD::ADD_PARTS) {
2957 BuildMI(BB, X86::ADD32rr, 2, Result).addReg(InVals[0]).addReg(InVals[2]);
2958 BuildMI(BB, X86::ADC32rr,2,Result+1).addReg(InVals[1]).addReg(InVals[3]);
2959 } else {
2960 BuildMI(BB, X86::SUB32rr, 2, Result).addReg(InVals[0]).addReg(InVals[2]);
2961 BuildMI(BB, X86::SBB32rr, 2,Result+1).addReg(InVals[1]).addReg(InVals[3]);
2962 }
2963 return Result+N.ResNo;
2964 }
2965
Chris Lattnerb38a7492005-04-02 04:01:14 +00002966 case ISD::SHL_PARTS:
2967 case ISD::SRA_PARTS:
2968 case ISD::SRL_PARTS: {
2969 assert(N.getNumOperands() == 3 && N.getValueType() == MVT::i32 &&
2970 "Not an i64 shift!");
2971 unsigned ShiftOpLo = SelectExpr(N.getOperand(0));
2972 unsigned ShiftOpHi = SelectExpr(N.getOperand(1));
2973 unsigned TmpReg = MakeReg(MVT::i32);
2974 if (N.getOpcode() == ISD::SRA_PARTS) {
2975 // If this is a SHR of a Long, then we need to do funny sign extension
2976 // stuff. TmpReg gets the value to use as the high-part if we are
2977 // shifting more than 32 bits.
2978 BuildMI(BB, X86::SAR32ri, 2, TmpReg).addReg(ShiftOpHi).addImm(31);
2979 } else {
2980 // Other shifts use a fixed zero value if the shift is more than 32 bits.
2981 BuildMI(BB, X86::MOV32ri, 1, TmpReg).addImm(0);
2982 }
2983
2984 // Initialize CL with the shift amount.
2985 unsigned ShiftAmountReg = SelectExpr(N.getOperand(2));
2986 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(ShiftAmountReg);
2987
2988 unsigned TmpReg2 = MakeReg(MVT::i32);
2989 unsigned TmpReg3 = MakeReg(MVT::i32);
2990 if (N.getOpcode() == ISD::SHL_PARTS) {
2991 // TmpReg2 = shld inHi, inLo
2992 BuildMI(BB, X86::SHLD32rrCL, 2,TmpReg2).addReg(ShiftOpHi)
2993 .addReg(ShiftOpLo);
2994 // TmpReg3 = shl inLo, CL
2995 BuildMI(BB, X86::SHL32rCL, 1, TmpReg3).addReg(ShiftOpLo);
Misha Brukman0e0a7a452005-04-21 23:38:14 +00002996
Chris Lattnerb38a7492005-04-02 04:01:14 +00002997 // Set the flags to indicate whether the shift was by more than 32 bits.
2998 BuildMI(BB, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
Misha Brukman0e0a7a452005-04-21 23:38:14 +00002999
Chris Lattnerb38a7492005-04-02 04:01:14 +00003000 // DestHi = (>32) ? TmpReg3 : TmpReg2;
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003001 BuildMI(BB, X86::CMOVNE32rr, 2,
Chris Lattnerb38a7492005-04-02 04:01:14 +00003002 Result+1).addReg(TmpReg2).addReg(TmpReg3);
3003 // DestLo = (>32) ? TmpReg : TmpReg3;
3004 BuildMI(BB, X86::CMOVNE32rr, 2,
3005 Result).addReg(TmpReg3).addReg(TmpReg);
3006 } else {
3007 // TmpReg2 = shrd inLo, inHi
3008 BuildMI(BB, X86::SHRD32rrCL,2,TmpReg2).addReg(ShiftOpLo)
3009 .addReg(ShiftOpHi);
3010 // TmpReg3 = s[ah]r inHi, CL
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003011 BuildMI(BB, N.getOpcode() == ISD::SRA_PARTS ? X86::SAR32rCL
Chris Lattnerb38a7492005-04-02 04:01:14 +00003012 : X86::SHR32rCL, 1, TmpReg3)
3013 .addReg(ShiftOpHi);
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003014
Chris Lattnerb38a7492005-04-02 04:01:14 +00003015 // Set the flags to indicate whether the shift was by more than 32 bits.
3016 BuildMI(BB, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003017
Chris Lattnerb38a7492005-04-02 04:01:14 +00003018 // DestLo = (>32) ? TmpReg3 : TmpReg2;
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003019 BuildMI(BB, X86::CMOVNE32rr, 2,
Chris Lattnerb38a7492005-04-02 04:01:14 +00003020 Result).addReg(TmpReg2).addReg(TmpReg3);
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003021
Chris Lattnerb38a7492005-04-02 04:01:14 +00003022 // DestHi = (>32) ? TmpReg : TmpReg3;
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003023 BuildMI(BB, X86::CMOVNE32rr, 2,
Chris Lattnerb38a7492005-04-02 04:01:14 +00003024 Result+1).addReg(TmpReg3).addReg(TmpReg);
3025 }
3026 return Result+N.ResNo;
3027 }
3028
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003029 case ISD::SELECT:
Chris Lattnerda2ce112005-01-16 07:34:08 +00003030 if (getRegPressure(N.getOperand(1)) > getRegPressure(N.getOperand(2))) {
3031 Tmp2 = SelectExpr(N.getOperand(1));
3032 Tmp3 = SelectExpr(N.getOperand(2));
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003033 } else {
Chris Lattnerda2ce112005-01-16 07:34:08 +00003034 Tmp3 = SelectExpr(N.getOperand(2));
3035 Tmp2 = SelectExpr(N.getOperand(1));
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003036 }
Chris Lattnerda2ce112005-01-16 07:34:08 +00003037 EmitSelectCC(N.getOperand(0), N.getValueType(), Tmp2, Tmp3, Result);
3038 return Result;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003039
3040 case ISD::SDIV:
3041 case ISD::UDIV:
3042 case ISD::SREM:
3043 case ISD::UREM: {
Chris Lattnerda2ce112005-01-16 07:34:08 +00003044 assert((N.getOpcode() != ISD::SREM || MVT::isInteger(N.getValueType())) &&
3045 "We don't support this operator!");
3046
Chris Lattner5bf26862005-04-13 03:29:53 +00003047 if (N.getOpcode() == ISD::SDIV) {
Chris Lattner3576c842005-01-25 20:35:10 +00003048 // We can fold loads into FpDIVs, but not really into any others.
Nate Begemanb8aa3ac2005-07-07 06:32:01 +00003049 if (N.getValueType() == MVT::f64 && !X86ScalarSSE) {
Chris Lattner3576c842005-01-25 20:35:10 +00003050 // Check for reversed and unreversed DIV.
3051 if (isFoldableLoad(N.getOperand(0), N.getOperand(1), true)) {
3052 if (N.getOperand(0).getOpcode() == ISD::EXTLOAD)
3053 Opc = X86::FDIVR32m;
3054 else
3055 Opc = X86::FDIVR64m;
3056 X86AddressMode AM;
3057 EmitFoldedLoad(N.getOperand(0), AM);
3058 Tmp1 = SelectExpr(N.getOperand(1));
3059 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
3060 return Result;
3061 } else if (isFoldableLoad(N.getOperand(1), N.getOperand(0), true) &&
3062 N.getOperand(1).getOpcode() == ISD::LOAD) {
3063 if (N.getOperand(1).getOpcode() == ISD::EXTLOAD)
3064 Opc = X86::FDIV32m;
3065 else
3066 Opc = X86::FDIV64m;
3067 X86AddressMode AM;
3068 EmitFoldedLoad(N.getOperand(1), AM);
3069 Tmp1 = SelectExpr(N.getOperand(0));
3070 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
3071 return Result;
3072 }
3073 }
3074
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003075 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
3076 // FIXME: These special cases should be handled by the lowering impl!
3077 unsigned RHS = CN->getValue();
3078 bool isNeg = false;
3079 if ((int)RHS < 0) {
3080 isNeg = true;
3081 RHS = -RHS;
3082 }
3083 if (RHS && (RHS & (RHS-1)) == 0) { // Signed division by power of 2?
3084 unsigned Log = log2(RHS);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003085 unsigned SAROpc, SHROpc, ADDOpc, NEGOpc;
3086 switch (N.getValueType()) {
3087 default: assert("Unknown type to signed divide!");
3088 case MVT::i8:
3089 SAROpc = X86::SAR8ri;
3090 SHROpc = X86::SHR8ri;
3091 ADDOpc = X86::ADD8rr;
3092 NEGOpc = X86::NEG8r;
3093 break;
3094 case MVT::i16:
3095 SAROpc = X86::SAR16ri;
3096 SHROpc = X86::SHR16ri;
3097 ADDOpc = X86::ADD16rr;
3098 NEGOpc = X86::NEG16r;
3099 break;
3100 case MVT::i32:
3101 SAROpc = X86::SAR32ri;
3102 SHROpc = X86::SHR32ri;
3103 ADDOpc = X86::ADD32rr;
3104 NEGOpc = X86::NEG32r;
3105 break;
3106 }
Chris Lattnera96e5772005-05-13 21:48:20 +00003107 unsigned RegSize = MVT::getSizeInBits(N.getValueType());
Chris Lattner11333092005-01-11 03:11:44 +00003108 Tmp1 = SelectExpr(N.getOperand(0));
Chris Lattnerca96c822005-05-13 21:50:27 +00003109 unsigned TmpReg;
3110 if (Log != 1) {
3111 TmpReg = MakeReg(N.getValueType());
3112 BuildMI(BB, SAROpc, 2, TmpReg).addReg(Tmp1).addImm(Log-1);
3113 } else {
3114 TmpReg = Tmp1;
3115 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003116 unsigned TmpReg2 = MakeReg(N.getValueType());
Chris Lattnera96e5772005-05-13 21:48:20 +00003117 BuildMI(BB, SHROpc, 2, TmpReg2).addReg(TmpReg).addImm(RegSize-Log);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003118 unsigned TmpReg3 = MakeReg(N.getValueType());
3119 BuildMI(BB, ADDOpc, 2, TmpReg3).addReg(Tmp1).addReg(TmpReg2);
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003120
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003121 unsigned TmpReg4 = isNeg ? MakeReg(N.getValueType()) : Result;
3122 BuildMI(BB, SAROpc, 2, TmpReg4).addReg(TmpReg3).addImm(Log);
3123 if (isNeg)
3124 BuildMI(BB, NEGOpc, 1, Result).addReg(TmpReg4);
3125 return Result;
3126 }
3127 }
Chris Lattner5bf26862005-04-13 03:29:53 +00003128 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003129
Chris Lattner11333092005-01-11 03:11:44 +00003130 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3131 Tmp1 = SelectExpr(N.getOperand(0));
3132 Tmp2 = SelectExpr(N.getOperand(1));
3133 } else {
3134 Tmp2 = SelectExpr(N.getOperand(1));
3135 Tmp1 = SelectExpr(N.getOperand(0));
3136 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003137
3138 bool isSigned = N.getOpcode() == ISD::SDIV || N.getOpcode() == ISD::SREM;
3139 bool isDiv = N.getOpcode() == ISD::SDIV || N.getOpcode() == ISD::UDIV;
3140 unsigned LoReg, HiReg, DivOpcode, MovOpcode, ClrOpcode, SExtOpcode;
3141 switch (N.getValueType()) {
3142 default: assert(0 && "Cannot sdiv this type!");
3143 case MVT::i8:
3144 DivOpcode = isSigned ? X86::IDIV8r : X86::DIV8r;
3145 LoReg = X86::AL;
3146 HiReg = X86::AH;
3147 MovOpcode = X86::MOV8rr;
3148 ClrOpcode = X86::MOV8ri;
3149 SExtOpcode = X86::CBW;
3150 break;
3151 case MVT::i16:
3152 DivOpcode = isSigned ? X86::IDIV16r : X86::DIV16r;
3153 LoReg = X86::AX;
3154 HiReg = X86::DX;
3155 MovOpcode = X86::MOV16rr;
3156 ClrOpcode = X86::MOV16ri;
3157 SExtOpcode = X86::CWD;
3158 break;
3159 case MVT::i32:
3160 DivOpcode = isSigned ? X86::IDIV32r : X86::DIV32r;
Chris Lattner42928302005-01-12 03:16:09 +00003161 LoReg = X86::EAX;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003162 HiReg = X86::EDX;
3163 MovOpcode = X86::MOV32rr;
3164 ClrOpcode = X86::MOV32ri;
3165 SExtOpcode = X86::CDQ;
3166 break;
Nate Begemanf63be7d2005-07-06 18:59:04 +00003167 case MVT::f32:
3168 BuildMI(BB, X86::DIVSSrr, 2, Result).addReg(Tmp1).addReg(Tmp2);
3169 return Result;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003170 case MVT::f64:
Nate Begemanf63be7d2005-07-06 18:59:04 +00003171 Opc = X86ScalarSSE ? X86::DIVSDrr : X86::FpDIV;
3172 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003173 return Result;
3174 }
3175
3176 // Set up the low part.
3177 BuildMI(BB, MovOpcode, 1, LoReg).addReg(Tmp1);
3178
3179 if (isSigned) {
3180 // Sign extend the low part into the high part.
3181 BuildMI(BB, SExtOpcode, 0);
3182 } else {
3183 // Zero out the high part, effectively zero extending the input.
3184 BuildMI(BB, ClrOpcode, 1, HiReg).addImm(0);
3185 }
3186
3187 // Emit the DIV/IDIV instruction.
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003188 BuildMI(BB, DivOpcode, 1).addReg(Tmp2);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003189
3190 // Get the result of the divide or rem.
3191 BuildMI(BB, MovOpcode, 1, Result).addReg(isDiv ? LoReg : HiReg);
3192 return Result;
3193 }
3194
3195 case ISD::SHL:
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003196 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
Chris Lattnera5ade062005-01-11 21:19:59 +00003197 if (CN->getValue() == 1) { // X = SHL Y, 1 -> X = ADD Y, Y
3198 switch (N.getValueType()) {
3199 default: assert(0 && "Cannot shift this type!");
3200 case MVT::i8: Opc = X86::ADD8rr; break;
3201 case MVT::i16: Opc = X86::ADD16rr; break;
3202 case MVT::i32: Opc = X86::ADD32rr; break;
3203 }
3204 Tmp1 = SelectExpr(N.getOperand(0));
3205 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp1);
3206 return Result;
3207 }
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003208
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003209 switch (N.getValueType()) {
3210 default: assert(0 && "Cannot shift this type!");
3211 case MVT::i8: Opc = X86::SHL8ri; break;
3212 case MVT::i16: Opc = X86::SHL16ri; break;
3213 case MVT::i32: Opc = X86::SHL32ri; break;
3214 }
Chris Lattner11333092005-01-11 03:11:44 +00003215 Tmp1 = SelectExpr(N.getOperand(0));
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003216 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
3217 return Result;
3218 }
Chris Lattner11333092005-01-11 03:11:44 +00003219
3220 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3221 Tmp1 = SelectExpr(N.getOperand(0));
3222 Tmp2 = SelectExpr(N.getOperand(1));
3223 } else {
3224 Tmp2 = SelectExpr(N.getOperand(1));
3225 Tmp1 = SelectExpr(N.getOperand(0));
3226 }
3227
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003228 switch (N.getValueType()) {
3229 default: assert(0 && "Cannot shift this type!");
3230 case MVT::i8 : Opc = X86::SHL8rCL; break;
3231 case MVT::i16: Opc = X86::SHL16rCL; break;
3232 case MVT::i32: Opc = X86::SHL32rCL; break;
3233 }
3234 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
3235 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
3236 return Result;
3237 case ISD::SRL:
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003238 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
3239 switch (N.getValueType()) {
3240 default: assert(0 && "Cannot shift this type!");
3241 case MVT::i8: Opc = X86::SHR8ri; break;
3242 case MVT::i16: Opc = X86::SHR16ri; break;
3243 case MVT::i32: Opc = X86::SHR32ri; break;
3244 }
Chris Lattner11333092005-01-11 03:11:44 +00003245 Tmp1 = SelectExpr(N.getOperand(0));
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003246 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
3247 return Result;
3248 }
Chris Lattner11333092005-01-11 03:11:44 +00003249
3250 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3251 Tmp1 = SelectExpr(N.getOperand(0));
3252 Tmp2 = SelectExpr(N.getOperand(1));
3253 } else {
3254 Tmp2 = SelectExpr(N.getOperand(1));
3255 Tmp1 = SelectExpr(N.getOperand(0));
3256 }
3257
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003258 switch (N.getValueType()) {
3259 default: assert(0 && "Cannot shift this type!");
3260 case MVT::i8 : Opc = X86::SHR8rCL; break;
3261 case MVT::i16: Opc = X86::SHR16rCL; break;
3262 case MVT::i32: Opc = X86::SHR32rCL; break;
3263 }
3264 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
3265 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
3266 return Result;
3267 case ISD::SRA:
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003268 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
3269 switch (N.getValueType()) {
3270 default: assert(0 && "Cannot shift this type!");
3271 case MVT::i8: Opc = X86::SAR8ri; break;
3272 case MVT::i16: Opc = X86::SAR16ri; break;
3273 case MVT::i32: Opc = X86::SAR32ri; break;
3274 }
Chris Lattner11333092005-01-11 03:11:44 +00003275 Tmp1 = SelectExpr(N.getOperand(0));
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003276 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
3277 return Result;
3278 }
Chris Lattner11333092005-01-11 03:11:44 +00003279
3280 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3281 Tmp1 = SelectExpr(N.getOperand(0));
3282 Tmp2 = SelectExpr(N.getOperand(1));
3283 } else {
3284 Tmp2 = SelectExpr(N.getOperand(1));
3285 Tmp1 = SelectExpr(N.getOperand(0));
3286 }
3287
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003288 switch (N.getValueType()) {
3289 default: assert(0 && "Cannot shift this type!");
3290 case MVT::i8 : Opc = X86::SAR8rCL; break;
3291 case MVT::i16: Opc = X86::SAR16rCL; break;
3292 case MVT::i32: Opc = X86::SAR32rCL; break;
3293 }
3294 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
3295 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
3296 return Result;
3297
3298 case ISD::SETCC:
Chris Lattnercb1aa8d2005-01-17 01:34:14 +00003299 EmitCMP(N.getOperand(0), N.getOperand(1), Node->hasOneUse());
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003300 EmitSetCC(BB, Result, cast<SetCCSDNode>(N)->getCondition(),
3301 MVT::isFloatingPoint(N.getOperand(1).getValueType()));
3302 return Result;
Chris Lattnere9ef81d2005-01-15 05:22:24 +00003303 case ISD::LOAD:
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003304 // Make sure we generate both values.
Chris Lattner4a108662005-01-18 03:51:59 +00003305 if (Result != 1) { // Generate the token
3306 if (!ExprMap.insert(std::make_pair(N.getValue(1), 1)).second)
3307 assert(0 && "Load already emitted!?");
3308 } else
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003309 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
3310
Chris Lattner5188ad72005-01-08 19:28:19 +00003311 switch (Node->getValueType(0)) {
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003312 default: assert(0 && "Cannot load this type!");
3313 case MVT::i1:
3314 case MVT::i8: Opc = X86::MOV8rm; break;
3315 case MVT::i16: Opc = X86::MOV16rm; break;
3316 case MVT::i32: Opc = X86::MOV32rm; break;
Nate Begemanf63be7d2005-07-06 18:59:04 +00003317 case MVT::f32: Opc = X86::MOVSSrm; break;
3318 case MVT::f64:
3319 if (X86ScalarSSE) {
3320 Opc = X86::MOVSDrm;
3321 } else {
3322 Opc = X86::FLD64m;
3323 ContainsFPCode = true;
3324 }
3325 break;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003326 }
Chris Lattner11333092005-01-11 03:11:44 +00003327
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003328 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N.getOperand(1))){
Chris Lattner11333092005-01-11 03:11:44 +00003329 Select(N.getOperand(0));
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003330 addConstantPoolReference(BuildMI(BB, Opc, 4, Result), CP->getIndex());
3331 } else {
3332 X86AddressMode AM;
Chris Lattner636e79a2005-01-13 05:53:16 +00003333
3334 SDOperand Chain = N.getOperand(0);
3335 SDOperand Address = N.getOperand(1);
3336 if (getRegPressure(Chain) > getRegPressure(Address)) {
3337 Select(Chain);
3338 SelectAddress(Address, AM);
3339 } else {
3340 SelectAddress(Address, AM);
3341 Select(Chain);
3342 }
3343
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003344 addFullAddress(BuildMI(BB, Opc, 4, Result), AM);
3345 }
3346 return Result;
Chris Lattner67649df2005-05-14 06:52:07 +00003347 case X86ISD::FILD64m:
3348 // Make sure we generate both values.
3349 assert(Result != 1 && N.getValueType() == MVT::f64);
3350 if (!ExprMap.insert(std::make_pair(N.getValue(1), 1)).second)
3351 assert(0 && "Load already emitted!?");
3352
3353 {
3354 X86AddressMode AM;
3355
3356 SDOperand Chain = N.getOperand(0);
3357 SDOperand Address = N.getOperand(1);
3358 if (getRegPressure(Chain) > getRegPressure(Address)) {
3359 Select(Chain);
3360 SelectAddress(Address, AM);
3361 } else {
3362 SelectAddress(Address, AM);
3363 Select(Chain);
3364 }
3365
3366 addFullAddress(BuildMI(BB, X86::FILD64m, 4, Result), AM);
3367 }
3368 return Result;
Chris Lattnere9ef81d2005-01-15 05:22:24 +00003369
3370 case ISD::EXTLOAD: // Arbitrarily codegen extloads as MOVZX*
3371 case ISD::ZEXTLOAD: {
3372 // Make sure we generate both values.
3373 if (Result != 1)
3374 ExprMap[N.getValue(1)] = 1; // Generate the token
3375 else
3376 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
3377
Chris Lattnerda2ce112005-01-16 07:34:08 +00003378 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N.getOperand(1)))
3379 if (Node->getValueType(0) == MVT::f64) {
3380 assert(cast<MVTSDNode>(Node)->getExtraValueType() == MVT::f32 &&
3381 "Bad EXTLOAD!");
3382 addConstantPoolReference(BuildMI(BB, X86::FLD32m, 4, Result),
3383 CP->getIndex());
3384 return Result;
3385 }
3386
Chris Lattnere9ef81d2005-01-15 05:22:24 +00003387 X86AddressMode AM;
3388 if (getRegPressure(Node->getOperand(0)) >
3389 getRegPressure(Node->getOperand(1))) {
3390 Select(Node->getOperand(0)); // chain
3391 SelectAddress(Node->getOperand(1), AM);
3392 } else {
3393 SelectAddress(Node->getOperand(1), AM);
3394 Select(Node->getOperand(0)); // chain
3395 }
3396
3397 switch (Node->getValueType(0)) {
3398 default: assert(0 && "Unknown type to sign extend to.");
3399 case MVT::f64:
3400 assert(cast<MVTSDNode>(Node)->getExtraValueType() == MVT::f32 &&
3401 "Bad EXTLOAD!");
3402 addFullAddress(BuildMI(BB, X86::FLD32m, 5, Result), AM);
3403 break;
3404 case MVT::i32:
3405 switch (cast<MVTSDNode>(Node)->getExtraValueType()) {
3406 default:
3407 assert(0 && "Bad zero extend!");
3408 case MVT::i1:
3409 case MVT::i8:
3410 addFullAddress(BuildMI(BB, X86::MOVZX32rm8, 5, Result), AM);
3411 break;
3412 case MVT::i16:
3413 addFullAddress(BuildMI(BB, X86::MOVZX32rm16, 5, Result), AM);
3414 break;
3415 }
3416 break;
3417 case MVT::i16:
3418 assert(cast<MVTSDNode>(Node)->getExtraValueType() <= MVT::i8 &&
3419 "Bad zero extend!");
3420 addFullAddress(BuildMI(BB, X86::MOVSX16rm8, 5, Result), AM);
3421 break;
3422 case MVT::i8:
3423 assert(cast<MVTSDNode>(Node)->getExtraValueType() == MVT::i1 &&
3424 "Bad zero extend!");
3425 addFullAddress(BuildMI(BB, X86::MOV8rm, 5, Result), AM);
3426 break;
3427 }
3428 return Result;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003429 }
Chris Lattnere9ef81d2005-01-15 05:22:24 +00003430 case ISD::SEXTLOAD: {
3431 // Make sure we generate both values.
3432 if (Result != 1)
3433 ExprMap[N.getValue(1)] = 1; // Generate the token
3434 else
3435 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
3436
3437 X86AddressMode AM;
3438 if (getRegPressure(Node->getOperand(0)) >
3439 getRegPressure(Node->getOperand(1))) {
3440 Select(Node->getOperand(0)); // chain
3441 SelectAddress(Node->getOperand(1), AM);
3442 } else {
3443 SelectAddress(Node->getOperand(1), AM);
3444 Select(Node->getOperand(0)); // chain
3445 }
3446
3447 switch (Node->getValueType(0)) {
3448 case MVT::i8: assert(0 && "Cannot sign extend from bool!");
3449 default: assert(0 && "Unknown type to sign extend to.");
3450 case MVT::i32:
3451 switch (cast<MVTSDNode>(Node)->getExtraValueType()) {
3452 default:
3453 case MVT::i1: assert(0 && "Cannot sign extend from bool!");
3454 case MVT::i8:
3455 addFullAddress(BuildMI(BB, X86::MOVSX32rm8, 5, Result), AM);
3456 break;
3457 case MVT::i16:
3458 addFullAddress(BuildMI(BB, X86::MOVSX32rm16, 5, Result), AM);
3459 break;
3460 }
3461 break;
3462 case MVT::i16:
3463 assert(cast<MVTSDNode>(Node)->getExtraValueType() == MVT::i8 &&
3464 "Cannot sign extend from bool!");
3465 addFullAddress(BuildMI(BB, X86::MOVSX16rm8, 5, Result), AM);
3466 break;
3467 }
3468 return Result;
3469 }
3470
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003471 case ISD::DYNAMIC_STACKALLOC:
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003472 // Generate both result values.
3473 if (Result != 1)
3474 ExprMap[N.getValue(1)] = 1; // Generate the token
3475 else
3476 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
3477
3478 // FIXME: We are currently ignoring the requested alignment for handling
3479 // greater than the stack alignment. This will need to be revisited at some
3480 // point. Align = N.getOperand(2);
3481
3482 if (!isa<ConstantSDNode>(N.getOperand(2)) ||
3483 cast<ConstantSDNode>(N.getOperand(2))->getValue() != 0) {
3484 std::cerr << "Cannot allocate stack object with greater alignment than"
3485 << " the stack alignment yet!";
3486 abort();
3487 }
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003488
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003489 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
Chris Lattner11333092005-01-11 03:11:44 +00003490 Select(N.getOperand(0));
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003491 BuildMI(BB, X86::SUB32ri, 2, X86::ESP).addReg(X86::ESP)
3492 .addImm(CN->getValue());
3493 } else {
Chris Lattner11333092005-01-11 03:11:44 +00003494 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3495 Select(N.getOperand(0));
3496 Tmp1 = SelectExpr(N.getOperand(1));
3497 } else {
3498 Tmp1 = SelectExpr(N.getOperand(1));
3499 Select(N.getOperand(0));
3500 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003501
3502 // Subtract size from stack pointer, thereby allocating some space.
3503 BuildMI(BB, X86::SUB32rr, 2, X86::ESP).addReg(X86::ESP).addReg(Tmp1);
3504 }
3505
3506 // Put a pointer to the space into the result register, by copying the stack
3507 // pointer.
3508 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::ESP);
3509 return Result;
3510
Chris Lattner239738a2005-05-14 08:48:15 +00003511 case X86ISD::TAILCALL:
3512 case X86ISD::CALL: {
Chris Lattner5188ad72005-01-08 19:28:19 +00003513 // The chain for this call is now lowered.
Chris Lattner239738a2005-05-14 08:48:15 +00003514 ExprMap.insert(std::make_pair(N.getValue(0), 1));
Chris Lattner5188ad72005-01-08 19:28:19 +00003515
Chris Lattnerc6f41812005-05-12 23:06:28 +00003516 bool isDirect = isa<GlobalAddressSDNode>(N.getOperand(1)) ||
3517 isa<ExternalSymbolSDNode>(N.getOperand(1));
3518 unsigned Callee = 0;
3519 if (isDirect) {
3520 Select(N.getOperand(0));
3521 } else {
3522 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3523 Select(N.getOperand(0));
3524 Callee = SelectExpr(N.getOperand(1));
3525 } else {
3526 Callee = SelectExpr(N.getOperand(1));
3527 Select(N.getOperand(0));
3528 }
3529 }
3530
3531 // If this call has values to pass in registers, do so now.
Chris Lattner239738a2005-05-14 08:48:15 +00003532 if (Node->getNumOperands() > 4) {
Chris Lattnerc6f41812005-05-12 23:06:28 +00003533 // The first value is passed in (a part of) EAX, the second in EDX.
Chris Lattner239738a2005-05-14 08:48:15 +00003534 unsigned RegOp1 = SelectExpr(N.getOperand(4));
Chris Lattnerc6f41812005-05-12 23:06:28 +00003535 unsigned RegOp2 =
Chris Lattner239738a2005-05-14 08:48:15 +00003536 Node->getNumOperands() > 5 ? SelectExpr(N.getOperand(5)) : 0;
Chris Lattnerc6f41812005-05-12 23:06:28 +00003537
Chris Lattner239738a2005-05-14 08:48:15 +00003538 switch (N.getOperand(4).getValueType()) {
Chris Lattnerc6f41812005-05-12 23:06:28 +00003539 default: assert(0 && "Bad thing to pass in regs");
3540 case MVT::i1:
3541 case MVT::i8: BuildMI(BB, X86::MOV8rr , 1,X86::AL).addReg(RegOp1); break;
3542 case MVT::i16: BuildMI(BB, X86::MOV16rr, 1,X86::AX).addReg(RegOp1); break;
3543 case MVT::i32: BuildMI(BB, X86::MOV32rr, 1,X86::EAX).addReg(RegOp1);break;
3544 }
3545 if (RegOp2)
Chris Lattner239738a2005-05-14 08:48:15 +00003546 switch (N.getOperand(5).getValueType()) {
Chris Lattnerc6f41812005-05-12 23:06:28 +00003547 default: assert(0 && "Bad thing to pass in regs");
3548 case MVT::i1:
3549 case MVT::i8:
3550 BuildMI(BB, X86::MOV8rr , 1, X86::DL).addReg(RegOp2);
3551 break;
3552 case MVT::i16:
3553 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(RegOp2);
3554 break;
3555 case MVT::i32:
3556 BuildMI(BB, X86::MOV32rr, 1, X86::EDX).addReg(RegOp2);
3557 break;
3558 }
3559 }
3560
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003561 if (GlobalAddressSDNode *GASD =
3562 dyn_cast<GlobalAddressSDNode>(N.getOperand(1))) {
3563 BuildMI(BB, X86::CALLpcrel32, 1).addGlobalAddress(GASD->getGlobal(),true);
3564 } else if (ExternalSymbolSDNode *ESSDN =
3565 dyn_cast<ExternalSymbolSDNode>(N.getOperand(1))) {
3566 BuildMI(BB, X86::CALLpcrel32,
3567 1).addExternalSymbol(ESSDN->getSymbol(), true);
3568 } else {
Chris Lattner11333092005-01-11 03:11:44 +00003569 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3570 Select(N.getOperand(0));
3571 Tmp1 = SelectExpr(N.getOperand(1));
3572 } else {
3573 Tmp1 = SelectExpr(N.getOperand(1));
3574 Select(N.getOperand(0));
3575 }
3576
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003577 BuildMI(BB, X86::CALL32r, 1).addReg(Tmp1);
3578 }
Chris Lattner239738a2005-05-14 08:48:15 +00003579
3580 // Get caller stack amount and amount the callee added to the stack pointer.
3581 Tmp1 = cast<ConstantSDNode>(N.getOperand(2))->getValue();
3582 Tmp2 = cast<ConstantSDNode>(N.getOperand(3))->getValue();
3583 BuildMI(BB, X86::ADJCALLSTACKUP, 2).addImm(Tmp1).addImm(Tmp2);
3584
3585 if (Node->getNumValues() != 1)
3586 switch (Node->getValueType(1)) {
3587 default: assert(0 && "Unknown value type for call result!");
3588 case MVT::Other: return 1;
3589 case MVT::i1:
3590 case MVT::i8:
3591 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
3592 break;
3593 case MVT::i16:
3594 BuildMI(BB, X86::MOV16rr, 1, Result).addReg(X86::AX);
3595 break;
3596 case MVT::i32:
3597 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::EAX);
3598 if (Node->getNumValues() == 3 && Node->getValueType(2) == MVT::i32)
3599 BuildMI(BB, X86::MOV32rr, 1, Result+1).addReg(X86::EDX);
3600 break;
3601 case MVT::f64: // Floating-point return values live in %ST(0)
Nate Begemanf63be7d2005-07-06 18:59:04 +00003602 if (X86ScalarSSE) {
3603 ContainsFPCode = true;
3604 BuildMI(BB, X86::FpGETRESULT, 1, X86::FP0);
3605
3606 unsigned Size = MVT::getSizeInBits(MVT::f64)/8;
3607 MachineFunction *F = BB->getParent();
3608 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
3609 addFrameReference(BuildMI(BB, X86::FST64m, 5), FrameIdx).addReg(X86::FP0);
3610 addFrameReference(BuildMI(BB, X86::MOVSDrm, 4, Result), FrameIdx);
3611 break;
3612 } else {
3613 ContainsFPCode = true;
3614 BuildMI(BB, X86::FpGETRESULT, 1, Result);
3615 break;
3616 }
Chris Lattner239738a2005-05-14 08:48:15 +00003617 }
3618 return Result+N.ResNo-1;
Chris Lattnerc6f41812005-05-12 23:06:28 +00003619 }
Chris Lattner966cdfb2005-05-09 21:17:38 +00003620 case ISD::READPORT:
3621 // First, determine that the size of the operand falls within the acceptable
3622 // range for this architecture.
3623 //
3624 if (Node->getOperand(1).getValueType() != MVT::i16) {
3625 std::cerr << "llvm.readport: Address size is not 16 bits\n";
3626 exit(1);
3627 }
3628
3629 // Make sure we generate both values.
3630 if (Result != 1) { // Generate the token
3631 if (!ExprMap.insert(std::make_pair(N.getValue(1), 1)).second)
3632 assert(0 && "readport already emitted!?");
3633 } else
3634 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
3635
3636 Select(Node->getOperand(0)); // Select the chain.
3637
3638 // If the port is a single-byte constant, use the immediate form.
3639 if (ConstantSDNode *Port = dyn_cast<ConstantSDNode>(Node->getOperand(1)))
3640 if ((Port->getValue() & 255) == Port->getValue()) {
3641 switch (Node->getValueType(0)) {
3642 case MVT::i8:
3643 BuildMI(BB, X86::IN8ri, 1).addImm(Port->getValue());
3644 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
3645 return Result;
3646 case MVT::i16:
3647 BuildMI(BB, X86::IN16ri, 1).addImm(Port->getValue());
3648 BuildMI(BB, X86::MOV16rr, 1, Result).addReg(X86::AX);
3649 return Result;
3650 case MVT::i32:
3651 BuildMI(BB, X86::IN32ri, 1).addImm(Port->getValue());
3652 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::EAX);
3653 return Result;
3654 default: break;
3655 }
3656 }
3657
3658 // Now, move the I/O port address into the DX register and use the IN
3659 // instruction to get the input data.
3660 //
3661 Tmp1 = SelectExpr(Node->getOperand(1));
3662 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(Tmp1);
3663 switch (Node->getValueType(0)) {
3664 case MVT::i8:
3665 BuildMI(BB, X86::IN8rr, 0);
3666 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
3667 return Result;
3668 case MVT::i16:
3669 BuildMI(BB, X86::IN16rr, 0);
3670 BuildMI(BB, X86::MOV16rr, 1, Result).addReg(X86::AX);
3671 return Result;
3672 case MVT::i32:
3673 BuildMI(BB, X86::IN32rr, 0);
3674 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::EAX);
3675 return Result;
3676 default:
3677 std::cerr << "Cannot do input on this data type";
3678 exit(1);
3679 }
3680
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003681 }
3682
3683 return 0;
3684}
3685
Chris Lattnere10269b2005-01-17 19:25:26 +00003686/// TryToFoldLoadOpStore - Given a store node, try to fold together a
3687/// load/op/store instruction. If successful return true.
3688bool ISel::TryToFoldLoadOpStore(SDNode *Node) {
3689 assert(Node->getOpcode() == ISD::STORE && "Can only do this for stores!");
3690 SDOperand Chain = Node->getOperand(0);
3691 SDOperand StVal = Node->getOperand(1);
Chris Lattner5c659812005-01-17 22:10:42 +00003692 SDOperand StPtr = Node->getOperand(2);
Chris Lattnere10269b2005-01-17 19:25:26 +00003693
3694 // The chain has to be a load, the stored value must be an integer binary
3695 // operation with one use.
Chris Lattner5c659812005-01-17 22:10:42 +00003696 if (!StVal.Val->hasOneUse() || StVal.Val->getNumOperands() != 2 ||
Chris Lattnere10269b2005-01-17 19:25:26 +00003697 MVT::isFloatingPoint(StVal.getValueType()))
3698 return false;
3699
Chris Lattner5c659812005-01-17 22:10:42 +00003700 // Token chain must either be a factor node or the load to fold.
3701 if (Chain.getOpcode() != ISD::LOAD && Chain.getOpcode() != ISD::TokenFactor)
3702 return false;
Chris Lattnere10269b2005-01-17 19:25:26 +00003703
Chris Lattner5c659812005-01-17 22:10:42 +00003704 SDOperand TheLoad;
3705
3706 // Check to see if there is a load from the same pointer that we're storing
3707 // to in either operand of the binop.
3708 if (StVal.getOperand(0).getOpcode() == ISD::LOAD &&
3709 StVal.getOperand(0).getOperand(1) == StPtr)
3710 TheLoad = StVal.getOperand(0);
3711 else if (StVal.getOperand(1).getOpcode() == ISD::LOAD &&
3712 StVal.getOperand(1).getOperand(1) == StPtr)
3713 TheLoad = StVal.getOperand(1);
3714 else
3715 return false; // No matching load operand.
3716
3717 // We can only fold the load if there are no intervening side-effecting
3718 // operations. This means that the store uses the load as its token chain, or
3719 // there are only token factor nodes in between the store and load.
3720 if (Chain != TheLoad.getValue(1)) {
3721 // Okay, the other option is that we have a store referring to (possibly
3722 // nested) token factor nodes. For now, just try peeking through one level
3723 // of token factors to see if this is the case.
3724 bool ChainOk = false;
3725 if (Chain.getOpcode() == ISD::TokenFactor) {
3726 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
3727 if (Chain.getOperand(i) == TheLoad.getValue(1)) {
3728 ChainOk = true;
3729 break;
3730 }
3731 }
3732
3733 if (!ChainOk) return false;
3734 }
3735
3736 if (TheLoad.getOperand(1) != StPtr)
Chris Lattnere10269b2005-01-17 19:25:26 +00003737 return false;
3738
3739 // Make sure that one of the operands of the binop is the load, and that the
3740 // load folds into the binop.
3741 if (((StVal.getOperand(0) != TheLoad ||
3742 !isFoldableLoad(TheLoad, StVal.getOperand(1))) &&
3743 (StVal.getOperand(1) != TheLoad ||
3744 !isFoldableLoad(TheLoad, StVal.getOperand(0)))))
3745 return false;
3746
3747 // Finally, check to see if this is one of the ops we can handle!
3748 static const unsigned ADDTAB[] = {
3749 X86::ADD8mi, X86::ADD16mi, X86::ADD32mi,
3750 X86::ADD8mr, X86::ADD16mr, X86::ADD32mr,
3751 };
3752 static const unsigned SUBTAB[] = {
3753 X86::SUB8mi, X86::SUB16mi, X86::SUB32mi,
3754 X86::SUB8mr, X86::SUB16mr, X86::SUB32mr,
3755 };
3756 static const unsigned ANDTAB[] = {
3757 X86::AND8mi, X86::AND16mi, X86::AND32mi,
3758 X86::AND8mr, X86::AND16mr, X86::AND32mr,
3759 };
3760 static const unsigned ORTAB[] = {
3761 X86::OR8mi, X86::OR16mi, X86::OR32mi,
3762 X86::OR8mr, X86::OR16mr, X86::OR32mr,
3763 };
3764 static const unsigned XORTAB[] = {
3765 X86::XOR8mi, X86::XOR16mi, X86::XOR32mi,
3766 X86::XOR8mr, X86::XOR16mr, X86::XOR32mr,
3767 };
3768 static const unsigned SHLTAB[] = {
3769 X86::SHL8mi, X86::SHL16mi, X86::SHL32mi,
3770 /*Have to put the reg in CL*/0, 0, 0,
3771 };
3772 static const unsigned SARTAB[] = {
3773 X86::SAR8mi, X86::SAR16mi, X86::SAR32mi,
3774 /*Have to put the reg in CL*/0, 0, 0,
3775 };
3776 static const unsigned SHRTAB[] = {
3777 X86::SHR8mi, X86::SHR16mi, X86::SHR32mi,
3778 /*Have to put the reg in CL*/0, 0, 0,
3779 };
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003780
Chris Lattnere10269b2005-01-17 19:25:26 +00003781 const unsigned *TabPtr = 0;
3782 switch (StVal.getOpcode()) {
3783 default:
3784 std::cerr << "CANNOT [mem] op= val: ";
3785 StVal.Val->dump(); std::cerr << "\n";
3786 case ISD::MUL:
3787 case ISD::SDIV:
3788 case ISD::UDIV:
3789 case ISD::SREM:
3790 case ISD::UREM: return false;
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003791
Chris Lattnere10269b2005-01-17 19:25:26 +00003792 case ISD::ADD: TabPtr = ADDTAB; break;
3793 case ISD::SUB: TabPtr = SUBTAB; break;
3794 case ISD::AND: TabPtr = ANDTAB; break;
3795 case ISD:: OR: TabPtr = ORTAB; break;
3796 case ISD::XOR: TabPtr = XORTAB; break;
3797 case ISD::SHL: TabPtr = SHLTAB; break;
3798 case ISD::SRA: TabPtr = SARTAB; break;
3799 case ISD::SRL: TabPtr = SHRTAB; break;
3800 }
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003801
Chris Lattnere10269b2005-01-17 19:25:26 +00003802 // Handle: [mem] op= CST
3803 SDOperand Op0 = StVal.getOperand(0);
3804 SDOperand Op1 = StVal.getOperand(1);
Chris Lattner0a078832005-01-23 23:20:06 +00003805 unsigned Opc = 0;
Chris Lattnere10269b2005-01-17 19:25:26 +00003806 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
3807 switch (Op0.getValueType()) { // Use Op0's type because of shifts.
3808 default: break;
3809 case MVT::i1:
3810 case MVT::i8: Opc = TabPtr[0]; break;
3811 case MVT::i16: Opc = TabPtr[1]; break;
3812 case MVT::i32: Opc = TabPtr[2]; break;
3813 }
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003814
Chris Lattnere10269b2005-01-17 19:25:26 +00003815 if (Opc) {
Chris Lattner4a108662005-01-18 03:51:59 +00003816 if (!ExprMap.insert(std::make_pair(TheLoad.getValue(1), 1)).second)
3817 assert(0 && "Already emitted?");
Chris Lattner5c659812005-01-17 22:10:42 +00003818 Select(Chain);
3819
Chris Lattnere10269b2005-01-17 19:25:26 +00003820 X86AddressMode AM;
3821 if (getRegPressure(TheLoad.getOperand(0)) >
3822 getRegPressure(TheLoad.getOperand(1))) {
3823 Select(TheLoad.getOperand(0));
3824 SelectAddress(TheLoad.getOperand(1), AM);
3825 } else {
3826 SelectAddress(TheLoad.getOperand(1), AM);
3827 Select(TheLoad.getOperand(0));
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003828 }
Chris Lattner5c659812005-01-17 22:10:42 +00003829
3830 if (StVal.getOpcode() == ISD::ADD) {
3831 if (CN->getValue() == 1) {
3832 switch (Op0.getValueType()) {
3833 default: break;
3834 case MVT::i8:
3835 addFullAddress(BuildMI(BB, X86::INC8m, 4), AM);
3836 return true;
3837 case MVT::i16: Opc = TabPtr[1];
3838 addFullAddress(BuildMI(BB, X86::INC16m, 4), AM);
3839 return true;
3840 case MVT::i32: Opc = TabPtr[2];
3841 addFullAddress(BuildMI(BB, X86::INC32m, 4), AM);
3842 return true;
3843 }
3844 } else if (CN->getValue()+1 == 0) { // [X] += -1 -> DEC [X]
3845 switch (Op0.getValueType()) {
3846 default: break;
3847 case MVT::i8:
3848 addFullAddress(BuildMI(BB, X86::DEC8m, 4), AM);
3849 return true;
3850 case MVT::i16: Opc = TabPtr[1];
3851 addFullAddress(BuildMI(BB, X86::DEC16m, 4), AM);
3852 return true;
3853 case MVT::i32: Opc = TabPtr[2];
3854 addFullAddress(BuildMI(BB, X86::DEC32m, 4), AM);
3855 return true;
3856 }
3857 }
3858 }
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003859
Chris Lattnere10269b2005-01-17 19:25:26 +00003860 addFullAddress(BuildMI(BB, Opc, 4+1),AM).addImm(CN->getValue());
3861 return true;
3862 }
3863 }
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003864
Chris Lattnere10269b2005-01-17 19:25:26 +00003865 // If we have [mem] = V op [mem], try to turn it into:
3866 // [mem] = [mem] op V.
3867 if (Op1 == TheLoad && StVal.getOpcode() != ISD::SUB &&
3868 StVal.getOpcode() != ISD::SHL && StVal.getOpcode() != ISD::SRA &&
3869 StVal.getOpcode() != ISD::SRL)
3870 std::swap(Op0, Op1);
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003871
Chris Lattnere10269b2005-01-17 19:25:26 +00003872 if (Op0 != TheLoad) return false;
3873
3874 switch (Op0.getValueType()) {
3875 default: return false;
3876 case MVT::i1:
3877 case MVT::i8: Opc = TabPtr[3]; break;
3878 case MVT::i16: Opc = TabPtr[4]; break;
3879 case MVT::i32: Opc = TabPtr[5]; break;
3880 }
Chris Lattner5c659812005-01-17 22:10:42 +00003881
Chris Lattnerb422aea2005-01-18 17:35:28 +00003882 // Table entry doesn't exist?
3883 if (Opc == 0) return false;
3884
Chris Lattner4a108662005-01-18 03:51:59 +00003885 if (!ExprMap.insert(std::make_pair(TheLoad.getValue(1), 1)).second)
3886 assert(0 && "Already emitted?");
Chris Lattner5c659812005-01-17 22:10:42 +00003887 Select(Chain);
Chris Lattnere10269b2005-01-17 19:25:26 +00003888 Select(TheLoad.getOperand(0));
Chris Lattner98a8ba02005-01-18 01:06:26 +00003889
Chris Lattnere10269b2005-01-17 19:25:26 +00003890 X86AddressMode AM;
3891 SelectAddress(TheLoad.getOperand(1), AM);
3892 unsigned Reg = SelectExpr(Op1);
Chris Lattner98a8ba02005-01-18 01:06:26 +00003893 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addReg(Reg);
Chris Lattnere10269b2005-01-17 19:25:26 +00003894 return true;
3895}
3896
Chris Lattner381e8872005-05-15 05:46:45 +00003897/// If node is a ret(tailcall) node, emit the specified tail call and return
3898/// true, otherwise return false.
3899///
3900/// FIXME: This whole thing should be a post-legalize optimization pass which
3901/// recognizes and transforms the dag. We don't want the selection phase doing
3902/// this stuff!!
3903///
3904bool ISel::EmitPotentialTailCall(SDNode *RetNode) {
3905 assert(RetNode->getOpcode() == ISD::RET && "Not a return");
3906
3907 SDOperand Chain = RetNode->getOperand(0);
3908
3909 // If this is a token factor node where one operand is a call, dig into it.
3910 SDOperand TokFactor;
3911 unsigned TokFactorOperand = 0;
3912 if (Chain.getOpcode() == ISD::TokenFactor) {
3913 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
3914 if (Chain.getOperand(i).getOpcode() == ISD::CALLSEQ_END ||
3915 Chain.getOperand(i).getOpcode() == X86ISD::TAILCALL) {
3916 TokFactorOperand = i;
3917 TokFactor = Chain;
3918 Chain = Chain.getOperand(i);
3919 break;
3920 }
3921 if (TokFactor.Val == 0) return false; // No call operand.
3922 }
3923
3924 // Skip the CALLSEQ_END node if present.
3925 if (Chain.getOpcode() == ISD::CALLSEQ_END)
3926 Chain = Chain.getOperand(0);
3927
3928 // Is a tailcall the last control operation that occurs before the return?
3929 if (Chain.getOpcode() != X86ISD::TAILCALL)
3930 return false;
3931
3932 // If we return a value, is it the value produced by the call?
3933 if (RetNode->getNumOperands() > 1) {
3934 // Not returning the ret val of the call?
3935 if (Chain.Val->getNumValues() == 1 ||
3936 RetNode->getOperand(1) != Chain.getValue(1))
3937 return false;
3938
3939 if (RetNode->getNumOperands() > 2) {
3940 if (Chain.Val->getNumValues() == 2 ||
3941 RetNode->getOperand(2) != Chain.getValue(2))
3942 return false;
3943 }
3944 assert(RetNode->getNumOperands() <= 3);
3945 }
3946
3947 // CalleeCallArgAmt - The total number of bytes used for the callee arg area.
3948 // For FastCC, this will always be > 0.
3949 unsigned CalleeCallArgAmt =
3950 cast<ConstantSDNode>(Chain.getOperand(2))->getValue();
3951
3952 // CalleeCallArgPopAmt - The number of bytes in the call area popped by the
3953 // callee. For FastCC this will always be > 0, for CCC this is always 0.
3954 unsigned CalleeCallArgPopAmt =
3955 cast<ConstantSDNode>(Chain.getOperand(3))->getValue();
3956
3957 // There are several cases we can handle here. First, if the caller and
3958 // callee are both CCC functions, we can tailcall if the callee takes <= the
3959 // number of argument bytes that the caller does.
3960 if (CalleeCallArgPopAmt == 0 && // Callee is C CallingConv?
3961 X86Lowering.getBytesToPopOnReturn() == 0) { // Caller is C CallingConv?
3962 // Check to see if caller arg area size >= callee arg area size.
3963 if (X86Lowering.getBytesCallerReserves() >= CalleeCallArgAmt) {
3964 //std::cerr << "CCC TAILCALL UNIMP!\n";
3965 // If TokFactor is non-null, emit all operands.
3966
3967 //EmitCCCToCCCTailCall(Chain.Val);
3968 //return true;
3969 }
3970 return false;
3971 }
3972
3973 // Second, if both are FastCC functions, we can always perform the tail call.
3974 if (CalleeCallArgPopAmt && X86Lowering.getBytesToPopOnReturn()) {
3975 // If TokFactor is non-null, emit all operands before the call.
3976 if (TokFactor.Val) {
3977 for (unsigned i = 0, e = TokFactor.getNumOperands(); i != e; ++i)
3978 if (i != TokFactorOperand)
3979 Select(TokFactor.getOperand(i));
3980 }
3981
3982 EmitFastCCToFastCCTailCall(Chain.Val);
3983 return true;
3984 }
3985
3986 // We don't support mixed calls, due to issues with alignment. We could in
3987 // theory handle some mixed calls from CCC -> FastCC if the stack is properly
3988 // aligned (which depends on the number of arguments to the callee). TODO.
3989 return false;
3990}
3991
3992static SDOperand GetAdjustedArgumentStores(SDOperand Chain, int Offset,
3993 SelectionDAG &DAG) {
3994 MVT::ValueType StoreVT;
3995 switch (Chain.getOpcode()) {
3996 case ISD::CALLSEQ_START:
Chris Lattnerea035432005-05-15 06:07:10 +00003997 // If we found the start of the call sequence, we're done. We actually
3998 // strip off the CALLSEQ_START node, to avoid generating the
3999 // ADJCALLSTACKDOWN marker for the tail call.
4000 return Chain.getOperand(0);
Chris Lattner381e8872005-05-15 05:46:45 +00004001 case ISD::TokenFactor: {
4002 std::vector<SDOperand> Ops;
4003 Ops.reserve(Chain.getNumOperands());
4004 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
4005 Ops.push_back(GetAdjustedArgumentStores(Chain.getOperand(i), Offset,DAG));
4006 return DAG.getNode(ISD::TokenFactor, MVT::Other, Ops);
4007 }
4008 case ISD::STORE: // Normal store
4009 StoreVT = Chain.getOperand(1).getValueType();
4010 break;
4011 case ISD::TRUNCSTORE: // FLOAT store
4012 StoreVT = cast<MVTSDNode>(Chain)->getExtraValueType();
4013 break;
4014 }
4015
4016 SDOperand OrigDest = Chain.getOperand(2);
4017 unsigned OrigOffset;
4018
4019 if (OrigDest.getOpcode() == ISD::CopyFromReg) {
4020 OrigOffset = 0;
4021 assert(cast<RegSDNode>(OrigDest)->getReg() == X86::ESP);
4022 } else {
4023 // We expect only (ESP+C)
4024 assert(OrigDest.getOpcode() == ISD::ADD &&
4025 isa<ConstantSDNode>(OrigDest.getOperand(1)) &&
4026 OrigDest.getOperand(0).getOpcode() == ISD::CopyFromReg &&
4027 cast<RegSDNode>(OrigDest.getOperand(0))->getReg() == X86::ESP);
4028 OrigOffset = cast<ConstantSDNode>(OrigDest.getOperand(1))->getValue();
4029 }
4030
4031 // Compute the new offset from the incoming ESP value we wish to use.
4032 unsigned NewOffset = OrigOffset + Offset;
4033
4034 unsigned OpSize = (MVT::getSizeInBits(StoreVT)+7)/8; // Bits -> Bytes
4035 MachineFunction &MF = DAG.getMachineFunction();
4036 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, NewOffset);
4037 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
4038
4039 SDOperand InChain = GetAdjustedArgumentStores(Chain.getOperand(0), Offset,
4040 DAG);
4041 if (Chain.getOpcode() == ISD::STORE)
4042 return DAG.getNode(ISD::STORE, MVT::Other, InChain, Chain.getOperand(1),
4043 FIN);
4044 assert(Chain.getOpcode() == ISD::TRUNCSTORE);
4045 return DAG.getNode(ISD::TRUNCSTORE, MVT::Other, InChain, Chain.getOperand(1),
4046 FIN, DAG.getSrcValue(NULL), StoreVT);
4047}
4048
4049
4050/// EmitFastCCToFastCCTailCall - Given a tailcall in the tail position to a
4051/// fastcc function from a fastcc function, emit the code to emit a 'proper'
4052/// tail call.
4053void ISel::EmitFastCCToFastCCTailCall(SDNode *TailCallNode) {
4054 unsigned CalleeCallArgSize =
4055 cast<ConstantSDNode>(TailCallNode->getOperand(2))->getValue();
4056 unsigned CallerArgSize = X86Lowering.getBytesToPopOnReturn();
4057
4058 //std::cerr << "****\n*** EMITTING TAIL CALL!\n****\n";
4059
4060 // Adjust argument stores. Instead of storing to [ESP], f.e., store to frame
4061 // indexes that are relative to the incoming ESP. If the incoming and
4062 // outgoing arg sizes are the same we will store to [InESP] instead of
4063 // [CurESP] and the ESP referenced will be relative to the incoming function
4064 // ESP.
4065 int ESPOffset = CallerArgSize-CalleeCallArgSize;
4066 SDOperand AdjustedArgStores =
4067 GetAdjustedArgumentStores(TailCallNode->getOperand(0), ESPOffset, *TheDAG);
4068
4069 // Copy the return address of the caller into a virtual register so we don't
4070 // clobber it.
4071 SDOperand RetVal;
4072 if (ESPOffset) {
4073 SDOperand RetValAddr = X86Lowering.getReturnAddressFrameIndex(*TheDAG);
4074 RetVal = TheDAG->getLoad(MVT::i32, TheDAG->getEntryNode(),
4075 RetValAddr, TheDAG->getSrcValue(NULL));
4076 SelectExpr(RetVal);
4077 }
4078
4079 // Codegen all of the argument stores.
4080 Select(AdjustedArgStores);
4081
4082 if (RetVal.Val) {
4083 // Emit a store of the saved ret value to the new location.
4084 MachineFunction &MF = TheDAG->getMachineFunction();
4085 int ReturnAddrFI = MF.getFrameInfo()->CreateFixedObject(4, ESPOffset-4);
4086 SDOperand RetValAddr = TheDAG->getFrameIndex(ReturnAddrFI, MVT::i32);
4087 Select(TheDAG->getNode(ISD::STORE, MVT::Other, TheDAG->getEntryNode(),
4088 RetVal, RetValAddr));
4089 }
4090
4091 // Get the destination value.
4092 SDOperand Callee = TailCallNode->getOperand(1);
4093 bool isDirect = isa<GlobalAddressSDNode>(Callee) ||
4094 isa<ExternalSymbolSDNode>(Callee);
Chris Lattner9cb2d612005-06-17 13:23:32 +00004095 unsigned CalleeReg = 0;
Chris Lattner381e8872005-05-15 05:46:45 +00004096 if (!isDirect) CalleeReg = SelectExpr(Callee);
4097
4098 unsigned RegOp1 = 0;
4099 unsigned RegOp2 = 0;
4100
4101 if (TailCallNode->getNumOperands() > 4) {
4102 // The first value is passed in (a part of) EAX, the second in EDX.
4103 RegOp1 = SelectExpr(TailCallNode->getOperand(4));
4104 if (TailCallNode->getNumOperands() > 5)
4105 RegOp2 = SelectExpr(TailCallNode->getOperand(5));
4106
4107 switch (TailCallNode->getOperand(4).getValueType()) {
4108 default: assert(0 && "Bad thing to pass in regs");
4109 case MVT::i1:
4110 case MVT::i8:
4111 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(RegOp1);
4112 RegOp1 = X86::AL;
4113 break;
4114 case MVT::i16:
4115 BuildMI(BB, X86::MOV16rr, 1,X86::AX).addReg(RegOp1);
4116 RegOp1 = X86::AX;
4117 break;
4118 case MVT::i32:
4119 BuildMI(BB, X86::MOV32rr, 1,X86::EAX).addReg(RegOp1);
4120 RegOp1 = X86::EAX;
4121 break;
4122 }
4123 if (RegOp2)
4124 switch (TailCallNode->getOperand(5).getValueType()) {
4125 default: assert(0 && "Bad thing to pass in regs");
4126 case MVT::i1:
4127 case MVT::i8:
4128 BuildMI(BB, X86::MOV8rr, 1, X86::DL).addReg(RegOp2);
4129 RegOp2 = X86::DL;
4130 break;
4131 case MVT::i16:
4132 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(RegOp2);
4133 RegOp2 = X86::DX;
4134 break;
4135 case MVT::i32:
4136 BuildMI(BB, X86::MOV32rr, 1, X86::EDX).addReg(RegOp2);
4137 RegOp2 = X86::EDX;
4138 break;
4139 }
4140 }
4141
4142 // Adjust ESP.
4143 if (ESPOffset)
4144 BuildMI(BB, X86::ADJSTACKPTRri, 2,
4145 X86::ESP).addReg(X86::ESP).addImm(ESPOffset);
4146
4147 // TODO: handle jmp [mem]
4148 if (!isDirect) {
4149 BuildMI(BB, X86::TAILJMPr, 1).addReg(CalleeReg);
4150 } else if (GlobalAddressSDNode *GASD = dyn_cast<GlobalAddressSDNode>(Callee)){
Chris Lattner16cb6f82005-05-19 05:54:33 +00004151 BuildMI(BB, X86::TAILJMPd, 1).addGlobalAddress(GASD->getGlobal(), true);
Chris Lattner381e8872005-05-15 05:46:45 +00004152 } else {
4153 ExternalSymbolSDNode *ESSDN = cast<ExternalSymbolSDNode>(Callee);
4154 BuildMI(BB, X86::TAILJMPd, 1).addExternalSymbol(ESSDN->getSymbol(), true);
4155 }
4156 // ADD IMPLICIT USE RegOp1/RegOp2's
4157}
4158
Chris Lattnere10269b2005-01-17 19:25:26 +00004159
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004160void ISel::Select(SDOperand N) {
4161 unsigned Tmp1, Tmp2, Opc;
4162
Nate Begeman85fdeb22005-03-24 04:39:54 +00004163 if (!ExprMap.insert(std::make_pair(N, 1)).second)
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004164 return; // Already selected.
4165
Chris Lattner989de032005-01-11 06:14:36 +00004166 SDNode *Node = N.Val;
4167
4168 switch (Node->getOpcode()) {
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004169 default:
Chris Lattner989de032005-01-11 06:14:36 +00004170 Node->dump(); std::cerr << "\n";
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004171 assert(0 && "Node not handled yet!");
4172 case ISD::EntryToken: return; // Noop
Chris Lattnerc3580712005-01-13 18:01:36 +00004173 case ISD::TokenFactor:
Chris Lattner1d50b7f2005-01-13 19:56:00 +00004174 if (Node->getNumOperands() == 2) {
Misha Brukman0e0a7a452005-04-21 23:38:14 +00004175 bool OneFirst =
Chris Lattner1d50b7f2005-01-13 19:56:00 +00004176 getRegPressure(Node->getOperand(1))>getRegPressure(Node->getOperand(0));
4177 Select(Node->getOperand(OneFirst));
4178 Select(Node->getOperand(!OneFirst));
4179 } else {
4180 std::vector<std::pair<unsigned, unsigned> > OpsP;
4181 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i)
4182 OpsP.push_back(std::make_pair(getRegPressure(Node->getOperand(i)), i));
4183 std::sort(OpsP.begin(), OpsP.end());
4184 std::reverse(OpsP.begin(), OpsP.end());
4185 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i)
4186 Select(Node->getOperand(OpsP[i].second));
4187 }
Chris Lattnerc3580712005-01-13 18:01:36 +00004188 return;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004189 case ISD::CopyToReg:
Chris Lattneref6806c2005-01-12 02:02:48 +00004190 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
4191 Select(N.getOperand(0));
4192 Tmp1 = SelectExpr(N.getOperand(1));
4193 } else {
4194 Tmp1 = SelectExpr(N.getOperand(1));
4195 Select(N.getOperand(0));
4196 }
Chris Lattner18c2f132005-01-13 20:50:02 +00004197 Tmp2 = cast<RegSDNode>(N)->getReg();
Misha Brukman0e0a7a452005-04-21 23:38:14 +00004198
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004199 if (Tmp1 != Tmp2) {
4200 switch (N.getOperand(1).getValueType()) {
4201 default: assert(0 && "Invalid type for operation!");
4202 case MVT::i1:
4203 case MVT::i8: Opc = X86::MOV8rr; break;
4204 case MVT::i16: Opc = X86::MOV16rr; break;
4205 case MVT::i32: Opc = X86::MOV32rr; break;
Nate Begemanf63be7d2005-07-06 18:59:04 +00004206 case MVT::f32: Opc = X86::MOVAPSrr; break;
4207 case MVT::f64:
4208 if (X86ScalarSSE) {
4209 Opc = X86::MOVAPDrr;
4210 } else {
4211 Opc = X86::FpMOV;
4212 ContainsFPCode = true;
4213 }
4214 break;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004215 }
4216 BuildMI(BB, Opc, 1, Tmp2).addReg(Tmp1);
4217 }
4218 return;
4219 case ISD::RET:
Chris Lattner381e8872005-05-15 05:46:45 +00004220 if (N.getOperand(0).getOpcode() == ISD::CALLSEQ_END ||
4221 N.getOperand(0).getOpcode() == X86ISD::TAILCALL ||
4222 N.getOperand(0).getOpcode() == ISD::TokenFactor)
4223 if (EmitPotentialTailCall(Node))
4224 return;
4225
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004226 switch (N.getNumOperands()) {
4227 default:
4228 assert(0 && "Unknown return instruction!");
4229 case 3:
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004230 assert(N.getOperand(1).getValueType() == MVT::i32 &&
4231 N.getOperand(2).getValueType() == MVT::i32 &&
4232 "Unknown two-register value!");
Chris Lattner11333092005-01-11 03:11:44 +00004233 if (getRegPressure(N.getOperand(1)) > getRegPressure(N.getOperand(2))) {
4234 Tmp1 = SelectExpr(N.getOperand(1));
4235 Tmp2 = SelectExpr(N.getOperand(2));
4236 } else {
4237 Tmp2 = SelectExpr(N.getOperand(2));
4238 Tmp1 = SelectExpr(N.getOperand(1));
4239 }
4240 Select(N.getOperand(0));
4241
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004242 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
4243 BuildMI(BB, X86::MOV32rr, 1, X86::EDX).addReg(Tmp2);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004244 break;
4245 case 2:
Chris Lattner11333092005-01-11 03:11:44 +00004246 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
4247 Select(N.getOperand(0));
4248 Tmp1 = SelectExpr(N.getOperand(1));
4249 } else {
4250 Tmp1 = SelectExpr(N.getOperand(1));
4251 Select(N.getOperand(0));
4252 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004253 switch (N.getOperand(1).getValueType()) {
4254 default: assert(0 && "All other types should have been promoted!!");
Nate Begemanf63be7d2005-07-06 18:59:04 +00004255 case MVT::f32:
4256 if (X86ScalarSSE) {
4257 // Spill the value to memory and reload it into top of stack.
4258 unsigned Size = MVT::getSizeInBits(MVT::f32)/8;
4259 MachineFunction *F = BB->getParent();
4260 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
4261 addFrameReference(BuildMI(BB, X86::MOVSSmr, 5), FrameIdx).addReg(Tmp1);
4262 addFrameReference(BuildMI(BB, X86::FLD32m, 4, X86::FP0), FrameIdx);
4263 BuildMI(BB, X86::FpSETRESULT, 1).addReg(X86::FP0);
4264 ContainsFPCode = true;
4265 } else {
4266 assert(0 && "MVT::f32 only legal with scalar sse fp");
4267 abort();
4268 }
4269 break;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004270 case MVT::f64:
Nate Begemanf63be7d2005-07-06 18:59:04 +00004271 if (X86ScalarSSE) {
4272 // Spill the value to memory and reload it into top of stack.
4273 unsigned Size = MVT::getSizeInBits(MVT::f64)/8;
4274 MachineFunction *F = BB->getParent();
4275 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
4276 addFrameReference(BuildMI(BB, X86::MOVSDmr, 5), FrameIdx).addReg(Tmp1);
4277 addFrameReference(BuildMI(BB, X86::FLD64m, 4, X86::FP0), FrameIdx);
4278 BuildMI(BB, X86::FpSETRESULT, 1).addReg(X86::FP0);
4279 ContainsFPCode = true;
4280 } else {
4281 BuildMI(BB, X86::FpSETRESULT, 1).addReg(Tmp1);
4282 }
4283 break;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004284 case MVT::i32:
Nate Begemanf63be7d2005-07-06 18:59:04 +00004285 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
4286 break;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004287 }
4288 break;
4289 case 1:
Chris Lattner11333092005-01-11 03:11:44 +00004290 Select(N.getOperand(0));
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004291 break;
4292 }
Chris Lattner3648c672005-05-13 21:44:04 +00004293 if (X86Lowering.getBytesToPopOnReturn() == 0)
4294 BuildMI(BB, X86::RET, 0); // Just emit a 'ret' instruction
4295 else
4296 BuildMI(BB, X86::RETI, 1).addImm(X86Lowering.getBytesToPopOnReturn());
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004297 return;
4298 case ISD::BR: {
4299 Select(N.getOperand(0));
4300 MachineBasicBlock *Dest =
4301 cast<BasicBlockSDNode>(N.getOperand(1))->getBasicBlock();
4302 BuildMI(BB, X86::JMP, 1).addMBB(Dest);
4303 return;
4304 }
4305
4306 case ISD::BRCOND: {
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004307 MachineBasicBlock *Dest =
4308 cast<BasicBlockSDNode>(N.getOperand(2))->getBasicBlock();
Chris Lattner11333092005-01-11 03:11:44 +00004309
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004310 // Try to fold a setcc into the branch. If this fails, emit a test/jne
4311 // pair.
Chris Lattner6c07aee2005-01-11 04:06:27 +00004312 if (EmitBranchCC(Dest, N.getOperand(0), N.getOperand(1))) {
4313 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
4314 Select(N.getOperand(0));
4315 Tmp1 = SelectExpr(N.getOperand(1));
4316 } else {
4317 Tmp1 = SelectExpr(N.getOperand(1));
4318 Select(N.getOperand(0));
4319 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004320 BuildMI(BB, X86::TEST8rr, 2).addReg(Tmp1).addReg(Tmp1);
4321 BuildMI(BB, X86::JNE, 1).addMBB(Dest);
4322 }
Chris Lattner11333092005-01-11 03:11:44 +00004323
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004324 return;
4325 }
Chris Lattnere9ef81d2005-01-15 05:22:24 +00004326
Chris Lattner4df0de92005-01-17 00:00:33 +00004327 case ISD::LOAD:
4328 // If this load could be folded into the only using instruction, and if it
4329 // is safe to emit the instruction here, try to do so now.
4330 if (Node->hasNUsesOfValue(1, 0)) {
4331 SDOperand TheVal = N.getValue(0);
4332 SDNode *User = 0;
4333 for (SDNode::use_iterator UI = Node->use_begin(); ; ++UI) {
4334 assert(UI != Node->use_end() && "Didn't find use!");
4335 SDNode *UN = *UI;
4336 for (unsigned i = 0, e = UN->getNumOperands(); i != e; ++i)
4337 if (UN->getOperand(i) == TheVal) {
4338 User = UN;
4339 goto FoundIt;
4340 }
4341 }
4342 FoundIt:
4343 // Only handle unary operators right now.
4344 if (User->getNumOperands() == 1) {
Chris Lattner4a108662005-01-18 03:51:59 +00004345 ExprMap.erase(N);
Chris Lattner4df0de92005-01-17 00:00:33 +00004346 SelectExpr(SDOperand(User, 0));
4347 return;
4348 }
4349 }
Chris Lattnerb71f8fc2005-01-18 04:00:54 +00004350 ExprMap.erase(N);
Chris Lattner4df0de92005-01-17 00:00:33 +00004351 SelectExpr(N);
4352 return;
Chris Lattner966cdfb2005-05-09 21:17:38 +00004353 case ISD::READPORT:
Chris Lattnere9ef81d2005-01-15 05:22:24 +00004354 case ISD::EXTLOAD:
4355 case ISD::SEXTLOAD:
4356 case ISD::ZEXTLOAD:
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004357 case ISD::DYNAMIC_STACKALLOC:
Chris Lattner239738a2005-05-14 08:48:15 +00004358 case X86ISD::TAILCALL:
4359 case X86ISD::CALL:
Chris Lattnerb71f8fc2005-01-18 04:00:54 +00004360 ExprMap.erase(N);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004361 SelectExpr(N);
4362 return;
Chris Lattnerc6f41812005-05-12 23:06:28 +00004363 case ISD::CopyFromReg:
Chris Lattner67649df2005-05-14 06:52:07 +00004364 case X86ISD::FILD64m:
Chris Lattnerc6f41812005-05-12 23:06:28 +00004365 ExprMap.erase(N);
4366 SelectExpr(N.getValue(0));
4367 return;
Chris Lattnere9ef81d2005-01-15 05:22:24 +00004368
4369 case ISD::TRUNCSTORE: { // truncstore chain, val, ptr :storety
4370 // On X86, we can represent all types except for Bool and Float natively.
4371 X86AddressMode AM;
4372 MVT::ValueType StoredTy = cast<MVTSDNode>(Node)->getExtraValueType();
Chris Lattnerda2ce112005-01-16 07:34:08 +00004373 assert((StoredTy == MVT::i1 || StoredTy == MVT::f32 ||
4374 StoredTy == MVT::i16 /*FIXME: THIS IS JUST FOR TESTING!*/)
4375 && "Unsupported TRUNCSTORE for this target!");
4376
4377 if (StoredTy == MVT::i16) {
4378 // FIXME: This is here just to allow testing. X86 doesn't really have a
4379 // TRUNCSTORE i16 operation, but this is required for targets that do not
4380 // have 16-bit integer registers. We occasionally disable 16-bit integer
4381 // registers to test the promotion code.
4382 Select(N.getOperand(0));
4383 Tmp1 = SelectExpr(N.getOperand(1));
4384 SelectAddress(N.getOperand(2), AM);
4385
4386 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
4387 addFullAddress(BuildMI(BB, X86::MOV16mr, 5), AM).addReg(X86::AX);
4388 return;
4389 }
Chris Lattnere9ef81d2005-01-15 05:22:24 +00004390
4391 // Store of constant bool?
4392 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
4393 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(2))) {
4394 Select(N.getOperand(0));
4395 SelectAddress(N.getOperand(2), AM);
4396 } else {
4397 SelectAddress(N.getOperand(2), AM);
4398 Select(N.getOperand(0));
4399 }
4400 addFullAddress(BuildMI(BB, X86::MOV8mi, 5), AM).addImm(CN->getValue());
4401 return;
4402 }
4403
4404 switch (StoredTy) {
4405 default: assert(0 && "Cannot truncstore this type!");
4406 case MVT::i1: Opc = X86::MOV8mr; break;
Nate Begemanf63be7d2005-07-06 18:59:04 +00004407 case MVT::f32:
4408 assert(!X86ScalarSSE && "Cannot truncstore scalar SSE regs");
4409 Opc = X86::FST32m; break;
Chris Lattnere9ef81d2005-01-15 05:22:24 +00004410 }
Misha Brukman0e0a7a452005-04-21 23:38:14 +00004411
Chris Lattnere9ef81d2005-01-15 05:22:24 +00004412 std::vector<std::pair<unsigned, unsigned> > RP;
4413 RP.push_back(std::make_pair(getRegPressure(N.getOperand(0)), 0));
4414 RP.push_back(std::make_pair(getRegPressure(N.getOperand(1)), 1));
4415 RP.push_back(std::make_pair(getRegPressure(N.getOperand(2)), 2));
4416 std::sort(RP.begin(), RP.end());
4417
Chris Lattner572dd082005-02-23 05:57:21 +00004418 Tmp1 = 0; // Silence a warning.
Chris Lattnere9ef81d2005-01-15 05:22:24 +00004419 for (unsigned i = 0; i != 3; ++i)
4420 switch (RP[2-i].second) {
4421 default: assert(0 && "Unknown operand number!");
4422 case 0: Select(N.getOperand(0)); break;
4423 case 1: Tmp1 = SelectExpr(N.getOperand(1)); break;
4424 case 2: SelectAddress(N.getOperand(2), AM); break;
4425 }
4426
4427 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addReg(Tmp1);
4428 return;
4429 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004430 case ISD::STORE: {
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004431 X86AddressMode AM;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004432
4433 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
4434 Opc = 0;
4435 switch (CN->getValueType(0)) {
4436 default: assert(0 && "Invalid type for operation!");
4437 case MVT::i1:
4438 case MVT::i8: Opc = X86::MOV8mi; break;
4439 case MVT::i16: Opc = X86::MOV16mi; break;
4440 case MVT::i32: Opc = X86::MOV32mi; break;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004441 }
4442 if (Opc) {
Chris Lattner11333092005-01-11 03:11:44 +00004443 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(2))) {
4444 Select(N.getOperand(0));
4445 SelectAddress(N.getOperand(2), AM);
4446 } else {
4447 SelectAddress(N.getOperand(2), AM);
4448 Select(N.getOperand(0));
4449 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004450 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addImm(CN->getValue());
4451 return;
4452 }
Chris Lattner75f354b2005-04-21 19:03:24 +00004453 } else if (GlobalAddressSDNode *GA =
4454 dyn_cast<GlobalAddressSDNode>(N.getOperand(1))) {
4455 assert(GA->getValueType(0) == MVT::i32 && "Bad pointer operand");
4456
4457 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(2))) {
4458 Select(N.getOperand(0));
4459 SelectAddress(N.getOperand(2), AM);
4460 } else {
4461 SelectAddress(N.getOperand(2), AM);
4462 Select(N.getOperand(0));
4463 }
4464 addFullAddress(BuildMI(BB, X86::MOV32mi, 4+1),
4465 AM).addGlobalAddress(GA->getGlobal());
4466 return;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004467 }
Chris Lattner837caa72005-01-11 23:21:30 +00004468
4469 // Check to see if this is a load/op/store combination.
Chris Lattnere10269b2005-01-17 19:25:26 +00004470 if (TryToFoldLoadOpStore(Node))
4471 return;
Chris Lattner837caa72005-01-11 23:21:30 +00004472
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004473 switch (N.getOperand(1).getValueType()) {
4474 default: assert(0 && "Cannot store this type!");
4475 case MVT::i1:
4476 case MVT::i8: Opc = X86::MOV8mr; break;
4477 case MVT::i16: Opc = X86::MOV16mr; break;
4478 case MVT::i32: Opc = X86::MOV32mr; break;
Nate Begemanf63be7d2005-07-06 18:59:04 +00004479 case MVT::f32: Opc = X86::MOVSSmr; break;
4480 case MVT::f64: Opc = X86ScalarSSE ? X86::MOVSDmr : X86::FST64m; break;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004481 }
Misha Brukman0e0a7a452005-04-21 23:38:14 +00004482
Chris Lattner11333092005-01-11 03:11:44 +00004483 std::vector<std::pair<unsigned, unsigned> > RP;
4484 RP.push_back(std::make_pair(getRegPressure(N.getOperand(0)), 0));
4485 RP.push_back(std::make_pair(getRegPressure(N.getOperand(1)), 1));
4486 RP.push_back(std::make_pair(getRegPressure(N.getOperand(2)), 2));
4487 std::sort(RP.begin(), RP.end());
4488
Chris Lattner572dd082005-02-23 05:57:21 +00004489 Tmp1 = 0; // Silence a warning.
Chris Lattner11333092005-01-11 03:11:44 +00004490 for (unsigned i = 0; i != 3; ++i)
4491 switch (RP[2-i].second) {
4492 default: assert(0 && "Unknown operand number!");
4493 case 0: Select(N.getOperand(0)); break;
4494 case 1: Tmp1 = SelectExpr(N.getOperand(1)); break;
Chris Lattnera3aa2e22005-01-11 03:37:59 +00004495 case 2: SelectAddress(N.getOperand(2), AM); break;
Chris Lattner11333092005-01-11 03:11:44 +00004496 }
4497
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004498 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addReg(Tmp1);
4499 return;
4500 }
Chris Lattner16cd04d2005-05-12 23:24:06 +00004501 case ISD::CALLSEQ_START:
Chris Lattner3648c672005-05-13 21:44:04 +00004502 Select(N.getOperand(0));
4503 // Stack amount
4504 Tmp1 = cast<ConstantSDNode>(N.getOperand(1))->getValue();
4505 BuildMI(BB, X86::ADJCALLSTACKDOWN, 1).addImm(Tmp1);
4506 return;
Chris Lattner16cd04d2005-05-12 23:24:06 +00004507 case ISD::CALLSEQ_END:
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004508 Select(N.getOperand(0));
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004509 return;
Chris Lattner989de032005-01-11 06:14:36 +00004510 case ISD::MEMSET: {
4511 Select(N.getOperand(0)); // Select the chain.
4512 unsigned Align =
4513 (unsigned)cast<ConstantSDNode>(Node->getOperand(4))->getValue();
4514 if (Align == 0) Align = 1;
4515
4516 // Turn the byte code into # iterations
4517 unsigned CountReg;
4518 unsigned Opcode;
4519 if (ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Node->getOperand(2))) {
4520 unsigned Val = ValC->getValue() & 255;
4521
4522 // If the value is a constant, then we can potentially use larger sets.
4523 switch (Align & 3) {
4524 case 2: // WORD aligned
4525 CountReg = MakeReg(MVT::i32);
4526 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
4527 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/2);
4528 } else {
4529 unsigned ByteReg = SelectExpr(Node->getOperand(3));
4530 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
4531 }
4532 BuildMI(BB, X86::MOV16ri, 1, X86::AX).addImm((Val << 8) | Val);
4533 Opcode = X86::REP_STOSW;
4534 break;
4535 case 0: // DWORD aligned
4536 CountReg = MakeReg(MVT::i32);
4537 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
4538 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/4);
4539 } else {
4540 unsigned ByteReg = SelectExpr(Node->getOperand(3));
4541 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
4542 }
4543 Val = (Val << 8) | Val;
4544 BuildMI(BB, X86::MOV32ri, 1, X86::EAX).addImm((Val << 16) | Val);
4545 Opcode = X86::REP_STOSD;
4546 break;
4547 default: // BYTE aligned
4548 CountReg = SelectExpr(Node->getOperand(3));
4549 BuildMI(BB, X86::MOV8ri, 1, X86::AL).addImm(Val);
4550 Opcode = X86::REP_STOSB;
4551 break;
4552 }
4553 } else {
4554 // If it's not a constant value we are storing, just fall back. We could
4555 // try to be clever to form 16 bit and 32 bit values, but we don't yet.
4556 unsigned ValReg = SelectExpr(Node->getOperand(2));
4557 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(ValReg);
4558 CountReg = SelectExpr(Node->getOperand(3));
4559 Opcode = X86::REP_STOSB;
4560 }
4561
4562 // No matter what the alignment is, we put the source in ESI, the
4563 // destination in EDI, and the count in ECX.
4564 unsigned TmpReg1 = SelectExpr(Node->getOperand(1));
4565 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
4566 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
4567 BuildMI(BB, Opcode, 0);
4568 return;
4569 }
Chris Lattner966cdfb2005-05-09 21:17:38 +00004570 case ISD::MEMCPY: {
Chris Lattner31805bf2005-01-11 06:19:26 +00004571 Select(N.getOperand(0)); // Select the chain.
4572 unsigned Align =
4573 (unsigned)cast<ConstantSDNode>(Node->getOperand(4))->getValue();
4574 if (Align == 0) Align = 1;
4575
4576 // Turn the byte code into # iterations
4577 unsigned CountReg;
4578 unsigned Opcode;
4579 switch (Align & 3) {
4580 case 2: // WORD aligned
4581 CountReg = MakeReg(MVT::i32);
4582 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
4583 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/2);
4584 } else {
4585 unsigned ByteReg = SelectExpr(Node->getOperand(3));
4586 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
4587 }
4588 Opcode = X86::REP_MOVSW;
4589 break;
4590 case 0: // DWORD aligned
4591 CountReg = MakeReg(MVT::i32);
4592 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
4593 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/4);
4594 } else {
4595 unsigned ByteReg = SelectExpr(Node->getOperand(3));
4596 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
4597 }
4598 Opcode = X86::REP_MOVSD;
4599 break;
4600 default: // BYTE aligned
4601 CountReg = SelectExpr(Node->getOperand(3));
4602 Opcode = X86::REP_MOVSB;
4603 break;
4604 }
4605
4606 // No matter what the alignment is, we put the source in ESI, the
4607 // destination in EDI, and the count in ECX.
4608 unsigned TmpReg1 = SelectExpr(Node->getOperand(1));
4609 unsigned TmpReg2 = SelectExpr(Node->getOperand(2));
4610 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
4611 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
4612 BuildMI(BB, X86::MOV32rr, 1, X86::ESI).addReg(TmpReg2);
4613 BuildMI(BB, Opcode, 0);
4614 return;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004615 }
Chris Lattner966cdfb2005-05-09 21:17:38 +00004616 case ISD::WRITEPORT:
4617 if (Node->getOperand(2).getValueType() != MVT::i16) {
4618 std::cerr << "llvm.writeport: Address size is not 16 bits\n";
4619 exit(1);
4620 }
4621 Select(Node->getOperand(0)); // Emit the chain.
4622
4623 Tmp1 = SelectExpr(Node->getOperand(1));
4624 switch (Node->getOperand(1).getValueType()) {
4625 case MVT::i8:
4626 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(Tmp1);
4627 Tmp2 = X86::OUT8ir; Opc = X86::OUT8rr;
4628 break;
4629 case MVT::i16:
4630 BuildMI(BB, X86::MOV16rr, 1, X86::AX).addReg(Tmp1);
4631 Tmp2 = X86::OUT16ir; Opc = X86::OUT16rr;
4632 break;
4633 case MVT::i32:
4634 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
4635 Tmp2 = X86::OUT32ir; Opc = X86::OUT32rr;
4636 break;
4637 default:
4638 std::cerr << "llvm.writeport: invalid data type for X86 target";
4639 exit(1);
4640 }
4641
4642 // If the port is a single-byte constant, use the immediate form.
4643 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Node->getOperand(2)))
4644 if ((CN->getValue() & 255) == CN->getValue()) {
4645 BuildMI(BB, Tmp2, 1).addImm(CN->getValue());
4646 return;
4647 }
4648
4649 // Otherwise, move the I/O port address into the DX register.
4650 unsigned Reg = SelectExpr(Node->getOperand(2));
4651 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(Reg);
4652 BuildMI(BB, Opc, 0);
4653 return;
4654 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004655 assert(0 && "Should not be reached!");
4656}
4657
4658
4659/// createX86PatternInstructionSelector - This pass converts an LLVM function
4660/// into a machine code representation using pattern matching and a machine
4661/// description file.
4662///
4663FunctionPass *llvm::createX86PatternInstructionSelector(TargetMachine &TM) {
Misha Brukman0e0a7a452005-04-21 23:38:14 +00004664 return new ISel(TM);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004665}