blob: 9401d298ece572515586f53c5a800b6c24e7f61f [file] [log] [blame]
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001//===-- X86ISelPattern.cpp - A pattern matching inst selector for X86 -----===//
Chris Lattner24aad1b2005-01-10 22:10:13 +00002//
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003// The LLVM Compiler Infrastructure
4//
5// This file was developed by the LLVM research group and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
Misha Brukman0e0a7a452005-04-21 23:38:14 +00007//
Chris Lattner8acb1ba2005-01-07 07:49:41 +00008//===----------------------------------------------------------------------===//
9//
10// This file defines a pattern matching instruction selector for X86.
11//
12//===----------------------------------------------------------------------===//
13
14#include "X86.h"
15#include "X86InstrBuilder.h"
16#include "X86RegisterInfo.h"
Nate Begemanfb5792f2005-07-12 01:41:54 +000017#include "X86Subtarget.h"
Chris Lattnerc6f41812005-05-12 23:06:28 +000018#include "llvm/CallingConv.h"
Chris Lattnere3e0f272005-05-09 03:36:39 +000019#include "llvm/Constants.h"
20#include "llvm/Instructions.h"
Chris Lattner8acb1ba2005-01-07 07:49:41 +000021#include "llvm/Function.h"
Chris Lattnere3e0f272005-05-09 03:36:39 +000022#include "llvm/CodeGen/MachineConstantPool.h"
Chris Lattner8acb1ba2005-01-07 07:49:41 +000023#include "llvm/CodeGen/MachineFunction.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/SelectionDAG.h"
26#include "llvm/CodeGen/SelectionDAGISel.h"
27#include "llvm/CodeGen/SSARegMap.h"
28#include "llvm/Target/TargetData.h"
29#include "llvm/Target/TargetLowering.h"
Nate Begemanfb5792f2005-07-12 01:41:54 +000030#include "llvm/Target/TargetMachine.h"
Chris Lattnerc5dcb532005-04-30 04:25:35 +000031#include "llvm/Target/TargetOptions.h"
Chris Lattnere3e0f272005-05-09 03:36:39 +000032#include "llvm/Support/CFG.h"
Chris Lattner8acb1ba2005-01-07 07:49:41 +000033#include "llvm/Support/MathExtras.h"
34#include "llvm/ADT/Statistic.h"
35#include <set>
Jeff Cohen603fea92005-01-12 04:29:05 +000036#include <algorithm>
Chris Lattner8acb1ba2005-01-07 07:49:41 +000037using namespace llvm;
38
Chris Lattnerc6f41812005-05-12 23:06:28 +000039// FIXME: temporary.
40#include "llvm/Support/CommandLine.h"
41static cl::opt<bool> EnableFastCC("enable-x86-fastcc", cl::Hidden,
42 cl::desc("Enable fastcc on X86"));
43
Chris Lattner67649df2005-05-14 06:52:07 +000044namespace {
45 // X86 Specific DAG Nodes
46 namespace X86ISD {
47 enum NodeType {
48 // Start the numbering where the builtin ops leave off.
49 FIRST_NUMBER = ISD::BUILTIN_OP_END,
50
51 /// FILD64m - This instruction implements SINT_TO_FP with a
52 /// 64-bit source in memory and a FP reg result. This corresponds to
53 /// the X86::FILD64m instruction. It has two inputs (token chain and
54 /// address) and two outputs (FP value and token chain).
55 FILD64m,
Chris Lattner239738a2005-05-14 08:48:15 +000056
Chris Lattner745d5382005-07-29 00:40:01 +000057 /// FISTP64m - This instruction implements FP_TO_SINT with a
58 /// 64-bit destination in memory and a FP reg source. This corresponds to
59 /// the X86::FISTP64m instruction. It has two inputs (token chain and
60 /// address) and two outputs (FP value and token chain).
61 FISTP64m,
62
Chris Lattner239738a2005-05-14 08:48:15 +000063 /// CALL/TAILCALL - These operations represent an abstract X86 call
64 /// instruction, which includes a bunch of information. In particular the
65 /// operands of these node are:
66 ///
67 /// #0 - The incoming token chain
68 /// #1 - The callee
69 /// #2 - The number of arg bytes the caller pushes on the stack.
70 /// #3 - The number of arg bytes the callee pops off the stack.
71 /// #4 - The value to pass in AL/AX/EAX (optional)
72 /// #5 - The value to pass in DL/DX/EDX (optional)
73 ///
74 /// The result values of these nodes are:
75 ///
76 /// #0 - The outgoing token chain
77 /// #1 - The first register result value (optional)
78 /// #2 - The second register result value (optional)
79 ///
80 /// The CALL vs TAILCALL distinction boils down to whether the callee is
81 /// known not to modify the caller's stack frame, as is standard with
82 /// LLVM.
83 CALL,
84 TAILCALL,
Chris Lattner67649df2005-05-14 06:52:07 +000085 };
86 }
87}
88
Chris Lattner8acb1ba2005-01-07 07:49:41 +000089//===----------------------------------------------------------------------===//
90// X86TargetLowering - X86 Implementation of the TargetLowering interface
91namespace {
92 class X86TargetLowering : public TargetLowering {
93 int VarArgsFrameIndex; // FrameIndex for start of varargs area.
Chris Lattner14824582005-01-09 00:01:27 +000094 int ReturnAddrIndex; // FrameIndex for return slot.
Chris Lattner381e8872005-05-15 05:46:45 +000095 int BytesToPopOnReturn; // Number of arg bytes ret should pop.
96 int BytesCallerReserves; // Number of arg bytes caller makes.
Chris Lattner8acb1ba2005-01-07 07:49:41 +000097 public:
98 X86TargetLowering(TargetMachine &TM) : TargetLowering(TM) {
99 // Set up the TargetLowering object.
Chris Lattner4df0de92005-01-17 00:00:33 +0000100
Chris Lattner653f7232005-05-13 22:46:57 +0000101 // X86 is weird, it always uses i8 for shift amounts and setcc results.
Chris Lattner4df0de92005-01-17 00:00:33 +0000102 setShiftAmountType(MVT::i8);
103 setSetCCResultType(MVT::i8);
Chris Lattner6659bd72005-04-07 19:41:46 +0000104 setSetCCResultContents(ZeroOrOneSetCCResult);
Chris Lattner009b55b2005-01-19 03:36:30 +0000105 setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0
Chris Lattner4df0de92005-01-17 00:00:33 +0000106
107 // Set up the register classes.
Nate Begemanf63be7d2005-07-06 18:59:04 +0000108 // FIXME: Eliminate these two classes when legalize can handle promotions
109 // well.
110 addRegisterClass(MVT::i1, X86::R8RegisterClass);
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000111 addRegisterClass(MVT::i8, X86::R8RegisterClass);
112 addRegisterClass(MVT::i16, X86::R16RegisterClass);
113 addRegisterClass(MVT::i32, X86::R32RegisterClass);
Jeff Cohen00b168892005-07-27 06:12:32 +0000114
Chris Lattnera28381c2005-07-16 00:28:20 +0000115 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
116 // operation.
117 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
118 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
119 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
120 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
Nate Begeman5a8441e2005-07-16 02:02:34 +0000121
122 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
123 // this operation.
124 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
125 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
Jeff Cohen00b168892005-07-27 06:12:32 +0000126
Chris Lattner745d5382005-07-29 00:40:01 +0000127 if (!X86ScalarSSE) {
128 // We can handle SINT_TO_FP and FP_TO_SINT from/TO i64 even though i64
129 // isn't legal.
130 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
131 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
132 }
133
Chris Lattnerda4d4692005-04-09 03:22:37 +0000134 setOperationAction(ISD::BRCONDTWOWAY , MVT::Other, Expand);
Chris Lattnerda2ce112005-01-16 07:34:08 +0000135 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand);
136 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Expand);
Chris Lattnerda2ce112005-01-16 07:34:08 +0000137 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
Chris Lattnerda2ce112005-01-16 07:34:08 +0000138 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
139 setOperationAction(ISD::SEXTLOAD , MVT::i1 , Expand);
140 setOperationAction(ISD::SREM , MVT::f64 , Expand);
Chris Lattnerc610d422005-05-11 05:00:34 +0000141 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
142 setOperationAction(ISD::CTTZ , MVT::i8 , Expand);
143 setOperationAction(ISD::CTLZ , MVT::i8 , Expand);
144 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
145 setOperationAction(ISD::CTTZ , MVT::i16 , Expand);
146 setOperationAction(ISD::CTLZ , MVT::i16 , Expand);
Andrew Lenharth691ef2b2005-05-03 17:19:30 +0000147 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
148 setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
Andrew Lenharthb5884d32005-05-04 19:25:37 +0000149 setOperationAction(ISD::CTLZ , MVT::i32 , Expand);
Jeff Cohen00b168892005-07-27 06:12:32 +0000150
Chris Lattner4e6ce5f2005-05-09 20:37:29 +0000151 setOperationAction(ISD::READIO , MVT::i1 , Expand);
152 setOperationAction(ISD::READIO , MVT::i8 , Expand);
153 setOperationAction(ISD::READIO , MVT::i16 , Expand);
154 setOperationAction(ISD::READIO , MVT::i32 , Expand);
155 setOperationAction(ISD::WRITEIO , MVT::i1 , Expand);
156 setOperationAction(ISD::WRITEIO , MVT::i8 , Expand);
157 setOperationAction(ISD::WRITEIO , MVT::i16 , Expand);
158 setOperationAction(ISD::WRITEIO , MVT::i32 , Expand);
Jeff Cohen00b168892005-07-27 06:12:32 +0000159
Chris Lattnerda2ce112005-01-16 07:34:08 +0000160 // These should be promoted to a larger select which is supported.
Nate Begemanf63be7d2005-07-06 18:59:04 +0000161 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
Chris Lattnerda2ce112005-01-16 07:34:08 +0000162 setOperationAction(ISD::SELECT , MVT::i8 , Promote);
Jeff Cohen00b168892005-07-27 06:12:32 +0000163
Nate Begemanf63be7d2005-07-06 18:59:04 +0000164 if (X86ScalarSSE) {
165 // Set up the FP register classes.
166 addRegisterClass(MVT::f32, X86::RXMMRegisterClass);
167 addRegisterClass(MVT::f64, X86::RXMMRegisterClass);
Jeff Cohen00b168892005-07-27 06:12:32 +0000168
Nate Begeman5a8441e2005-07-16 02:02:34 +0000169 // SSE has no load+extend ops
Nate Begemanf63be7d2005-07-06 18:59:04 +0000170 setOperationAction(ISD::EXTLOAD, MVT::f32, Expand);
171 setOperationAction(ISD::ZEXTLOAD, MVT::f32, Expand);
Nate Begeman5a8441e2005-07-16 02:02:34 +0000172
173 // SSE has no i16 to fp conversion, only i32
174 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
175
Nate Begemanf63be7d2005-07-06 18:59:04 +0000176 // We don't support sin/cos/sqrt/fmod
177 setOperationAction(ISD::FSIN , MVT::f64, Expand);
178 setOperationAction(ISD::FCOS , MVT::f64, Expand);
179 setOperationAction(ISD::FABS , MVT::f64, Expand);
180 setOperationAction(ISD::FNEG , MVT::f64, Expand);
181 setOperationAction(ISD::SREM , MVT::f64, Expand);
182 setOperationAction(ISD::FSIN , MVT::f32, Expand);
183 setOperationAction(ISD::FCOS , MVT::f32, Expand);
184 setOperationAction(ISD::FABS , MVT::f32, Expand);
185 setOperationAction(ISD::FNEG , MVT::f32, Expand);
186 setOperationAction(ISD::SREM , MVT::f32, Expand);
187 } else {
188 // Set up the FP register classes.
189 addRegisterClass(MVT::f64, X86::RFPRegisterClass);
Jeff Cohen00b168892005-07-27 06:12:32 +0000190
Nate Begemanf63be7d2005-07-06 18:59:04 +0000191 if (!UnsafeFPMath) {
192 setOperationAction(ISD::FSIN , MVT::f64 , Expand);
193 setOperationAction(ISD::FCOS , MVT::f64 , Expand);
194 }
Jeff Cohen00b168892005-07-27 06:12:32 +0000195
Nate Begemanf63be7d2005-07-06 18:59:04 +0000196 addLegalFPImmediate(+0.0); // FLD0
197 addLegalFPImmediate(+1.0); // FLD1
198 addLegalFPImmediate(-0.0); // FLD0/FCHS
199 addLegalFPImmediate(-1.0); // FLD1/FCHS
200 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000201 computeRegisterProperties();
Reid Spencera0f5bf32005-07-19 04:52:44 +0000202
203 maxStoresPerMemSet = 8; // For %llvm.memset -> sequence of stores
204 maxStoresPerMemCpy = 8; // For %llvm.memcpy -> sequence of stores
205 maxStoresPerMemMove = 8; // For %llvm.memmove -> sequence of stores
206 allowUnalignedStores = true; // x86 supports it!
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000207 }
Jeff Cohen00b168892005-07-27 06:12:32 +0000208
Chris Lattner3648c672005-05-13 21:44:04 +0000209 // Return the number of bytes that a function should pop when it returns (in
210 // addition to the space used by the return address).
211 //
212 unsigned getBytesToPopOnReturn() const { return BytesToPopOnReturn; }
213
Chris Lattner381e8872005-05-15 05:46:45 +0000214 // Return the number of bytes that the caller reserves for arguments passed
215 // to this function.
216 unsigned getBytesCallerReserves() const { return BytesCallerReserves; }
217
Chris Lattner67649df2005-05-14 06:52:07 +0000218 /// LowerOperation - Provide custom lowering hooks for some operations.
219 ///
220 virtual SDOperand LowerOperation(SDOperand Op, SelectionDAG &DAG);
221
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000222 /// LowerArguments - This hook must be implemented to indicate how we should
223 /// lower the arguments for the specified function, into the specified DAG.
224 virtual std::vector<SDOperand>
225 LowerArguments(Function &F, SelectionDAG &DAG);
226
227 /// LowerCallTo - This hook lowers an abstract call to a function into an
228 /// actual call.
Chris Lattner5188ad72005-01-08 19:28:19 +0000229 virtual std::pair<SDOperand, SDOperand>
Jeff Cohen00b168892005-07-27 06:12:32 +0000230 LowerCallTo(SDOperand Chain, const Type *RetTy, bool isVarArg, unsigned CC,
Chris Lattneradf6a962005-05-13 18:50:42 +0000231 bool isTailCall, SDOperand Callee, ArgListTy &Args,
232 SelectionDAG &DAG);
Chris Lattner14824582005-01-09 00:01:27 +0000233
Chris Lattnere0fe2252005-07-05 19:58:54 +0000234 virtual SDOperand LowerVAStart(SDOperand Chain, SDOperand VAListP,
235 Value *VAListV, SelectionDAG &DAG);
Chris Lattner14824582005-01-09 00:01:27 +0000236 virtual std::pair<SDOperand,SDOperand>
Chris Lattnere0fe2252005-07-05 19:58:54 +0000237 LowerVAArg(SDOperand Chain, SDOperand VAListP, Value *VAListV,
238 const Type *ArgTy, SelectionDAG &DAG);
Jeff Cohen00b168892005-07-27 06:12:32 +0000239
Chris Lattner14824582005-01-09 00:01:27 +0000240 virtual std::pair<SDOperand, SDOperand>
241 LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain, unsigned Depth,
242 SelectionDAG &DAG);
Chris Lattner381e8872005-05-15 05:46:45 +0000243
244 SDOperand getReturnAddressFrameIndex(SelectionDAG &DAG);
245
Chris Lattnerc6f41812005-05-12 23:06:28 +0000246 private:
247 // C Calling Convention implementation.
248 std::vector<SDOperand> LowerCCCArguments(Function &F, SelectionDAG &DAG);
249 std::pair<SDOperand, SDOperand>
250 LowerCCCCallTo(SDOperand Chain, const Type *RetTy, bool isVarArg,
Chris Lattner2e7714a2005-05-13 20:29:13 +0000251 bool isTailCall,
Chris Lattnerc6f41812005-05-12 23:06:28 +0000252 SDOperand Callee, ArgListTy &Args, SelectionDAG &DAG);
Jeff Cohen00b168892005-07-27 06:12:32 +0000253
Chris Lattnerc6f41812005-05-12 23:06:28 +0000254 // Fast Calling Convention implementation.
255 std::vector<SDOperand> LowerFastCCArguments(Function &F, SelectionDAG &DAG);
256 std::pair<SDOperand, SDOperand>
Chris Lattner2e7714a2005-05-13 20:29:13 +0000257 LowerFastCCCallTo(SDOperand Chain, const Type *RetTy, bool isTailCall,
Chris Lattnerc6f41812005-05-12 23:06:28 +0000258 SDOperand Callee, ArgListTy &Args, SelectionDAG &DAG);
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000259 };
260}
261
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000262std::vector<SDOperand>
263X86TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
Chris Lattnerc6f41812005-05-12 23:06:28 +0000264 if (F.getCallingConv() == CallingConv::Fast && EnableFastCC)
265 return LowerFastCCArguments(F, DAG);
266 return LowerCCCArguments(F, DAG);
267}
268
269std::pair<SDOperand, SDOperand>
270X86TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy,
271 bool isVarArg, unsigned CallingConv,
Jeff Cohen00b168892005-07-27 06:12:32 +0000272 bool isTailCall,
Chris Lattnerc6f41812005-05-12 23:06:28 +0000273 SDOperand Callee, ArgListTy &Args,
274 SelectionDAG &DAG) {
275 assert((!isVarArg || CallingConv == CallingConv::C) &&
276 "Only C takes varargs!");
277 if (CallingConv == CallingConv::Fast && EnableFastCC)
Chris Lattner2e7714a2005-05-13 20:29:13 +0000278 return LowerFastCCCallTo(Chain, RetTy, isTailCall, Callee, Args, DAG);
279 return LowerCCCCallTo(Chain, RetTy, isVarArg, isTailCall, Callee, Args, DAG);
Chris Lattnerc6f41812005-05-12 23:06:28 +0000280}
281
282//===----------------------------------------------------------------------===//
Chris Lattner653f7232005-05-13 22:46:57 +0000283// C Calling Convention implementation
Chris Lattnerc6f41812005-05-12 23:06:28 +0000284//===----------------------------------------------------------------------===//
285
286std::vector<SDOperand>
287X86TargetLowering::LowerCCCArguments(Function &F, SelectionDAG &DAG) {
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000288 std::vector<SDOperand> ArgValues;
289
Chris Lattner6415bb42005-05-10 03:53:18 +0000290 MachineFunction &MF = DAG.getMachineFunction();
291 MachineFrameInfo *MFI = MF.getFrameInfo();
292
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000293 // Add DAG nodes to load the arguments... On entry to a function on the X86,
294 // the stack frame looks like this:
295 //
296 // [ESP] -- return address
297 // [ESP + 4] -- first argument (leftmost lexically)
298 // [ESP + 8] -- second argument, if first argument is four bytes in size
Misha Brukman0e0a7a452005-04-21 23:38:14 +0000299 // ...
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000300 //
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000301 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
Chris Lattnere4d5c442005-03-15 04:54:21 +0000302 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000303 MVT::ValueType ObjectVT = getValueType(I->getType());
304 unsigned ArgIncrement = 4;
305 unsigned ObjSize;
306 switch (ObjectVT) {
307 default: assert(0 && "Unhandled argument type!");
308 case MVT::i1:
309 case MVT::i8: ObjSize = 1; break;
310 case MVT::i16: ObjSize = 2; break;
311 case MVT::i32: ObjSize = 4; break;
312 case MVT::i64: ObjSize = ArgIncrement = 8; break;
313 case MVT::f32: ObjSize = 4; break;
314 case MVT::f64: ObjSize = ArgIncrement = 8; break;
315 }
316 // Create the frame index object for this incoming parameter...
317 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
Misha Brukman0e0a7a452005-04-21 23:38:14 +0000318
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000319 // Create the SelectionDAG nodes corresponding to a load from this parameter
320 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
321
322 // Don't codegen dead arguments. FIXME: remove this check when we can nuke
323 // dead loads.
324 SDOperand ArgValue;
325 if (!I->use_empty())
Chris Lattnera80d2bd2005-05-09 05:40:26 +0000326 ArgValue = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN,
327 DAG.getSrcValue(NULL));
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000328 else {
329 if (MVT::isInteger(ObjectVT))
330 ArgValue = DAG.getConstant(0, ObjectVT);
331 else
332 ArgValue = DAG.getConstantFP(0, ObjectVT);
333 }
334 ArgValues.push_back(ArgValue);
335
336 ArgOffset += ArgIncrement; // Move on to the next argument...
337 }
338
339 // If the function takes variable number of arguments, make a frame index for
340 // the start of the first vararg value... for expansion of llvm.va_start.
341 if (F.isVarArg())
342 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
Chris Lattner3648c672005-05-13 21:44:04 +0000343 ReturnAddrIndex = 0; // No return address slot generated yet.
344 BytesToPopOnReturn = 0; // Callee pops nothing.
Chris Lattner381e8872005-05-15 05:46:45 +0000345 BytesCallerReserves = ArgOffset;
Chris Lattner4c52f0e2005-04-09 15:23:56 +0000346
347 // Finally, inform the code generator which regs we return values in.
348 switch (getValueType(F.getReturnType())) {
349 default: assert(0 && "Unknown type!");
350 case MVT::isVoid: break;
351 case MVT::i1:
352 case MVT::i8:
353 case MVT::i16:
354 case MVT::i32:
355 MF.addLiveOut(X86::EAX);
356 break;
357 case MVT::i64:
358 MF.addLiveOut(X86::EAX);
359 MF.addLiveOut(X86::EDX);
360 break;
361 case MVT::f32:
362 case MVT::f64:
363 MF.addLiveOut(X86::ST0);
364 break;
365 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000366 return ArgValues;
367}
368
Chris Lattner5188ad72005-01-08 19:28:19 +0000369std::pair<SDOperand, SDOperand>
Chris Lattnerc6f41812005-05-12 23:06:28 +0000370X86TargetLowering::LowerCCCCallTo(SDOperand Chain, const Type *RetTy,
Chris Lattner2e7714a2005-05-13 20:29:13 +0000371 bool isVarArg, bool isTailCall,
372 SDOperand Callee, ArgListTy &Args,
373 SelectionDAG &DAG) {
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000374 // Count how many bytes are to be pushed on the stack.
375 unsigned NumBytes = 0;
376
377 if (Args.empty()) {
378 // Save zero bytes.
Chris Lattner16cd04d2005-05-12 23:24:06 +0000379 Chain = DAG.getNode(ISD::CALLSEQ_START, MVT::Other, Chain,
Chris Lattner5188ad72005-01-08 19:28:19 +0000380 DAG.getConstant(0, getPointerTy()));
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000381 } else {
382 for (unsigned i = 0, e = Args.size(); i != e; ++i)
383 switch (getValueType(Args[i].second)) {
384 default: assert(0 && "Unknown value type!");
385 case MVT::i1:
386 case MVT::i8:
387 case MVT::i16:
388 case MVT::i32:
389 case MVT::f32:
390 NumBytes += 4;
391 break;
392 case MVT::i64:
393 case MVT::f64:
394 NumBytes += 8;
395 break;
396 }
397
Chris Lattner16cd04d2005-05-12 23:24:06 +0000398 Chain = DAG.getNode(ISD::CALLSEQ_START, MVT::Other, Chain,
Chris Lattner5188ad72005-01-08 19:28:19 +0000399 DAG.getConstant(NumBytes, getPointerTy()));
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000400
401 // Arguments go on the stack in reverse order, as specified by the ABI.
402 unsigned ArgOffset = 0;
Chris Lattner7f2afac2005-01-14 22:37:41 +0000403 SDOperand StackPtr = DAG.getCopyFromReg(X86::ESP, MVT::i32,
404 DAG.getEntryNode());
Chris Lattnerb62e1e22005-01-21 19:46:38 +0000405 std::vector<SDOperand> Stores;
406
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000407 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000408 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
409 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
410
411 switch (getValueType(Args[i].second)) {
412 default: assert(0 && "Unexpected ValueType for argument!");
413 case MVT::i1:
414 case MVT::i8:
415 case MVT::i16:
416 // Promote the integer to 32 bits. If the input type is signed use a
417 // sign extend, otherwise use a zero extend.
418 if (Args[i].second->isSigned())
419 Args[i].first =DAG.getNode(ISD::SIGN_EXTEND, MVT::i32, Args[i].first);
420 else
421 Args[i].first =DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Args[i].first);
422
423 // FALL THROUGH
424 case MVT::i32:
425 case MVT::f32:
Chris Lattnerb62e1e22005-01-21 19:46:38 +0000426 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
Chris Lattnera80d2bd2005-05-09 05:40:26 +0000427 Args[i].first, PtrOff,
428 DAG.getSrcValue(NULL)));
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000429 ArgOffset += 4;
430 break;
431 case MVT::i64:
432 case MVT::f64:
Chris Lattnerb62e1e22005-01-21 19:46:38 +0000433 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
Chris Lattnera80d2bd2005-05-09 05:40:26 +0000434 Args[i].first, PtrOff,
435 DAG.getSrcValue(NULL)));
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000436 ArgOffset += 8;
437 break;
438 }
439 }
Chris Lattnerb62e1e22005-01-21 19:46:38 +0000440 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, Stores);
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000441 }
442
443 std::vector<MVT::ValueType> RetVals;
444 MVT::ValueType RetTyVT = getValueType(RetTy);
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000445 RetVals.push_back(MVT::Other);
446
Chris Lattner239738a2005-05-14 08:48:15 +0000447 // The result values produced have to be legal. Promote the result.
448 switch (RetTyVT) {
449 case MVT::isVoid: break;
450 default:
451 RetVals.push_back(RetTyVT);
452 break;
453 case MVT::i1:
454 case MVT::i8:
455 case MVT::i16:
456 RetVals.push_back(MVT::i32);
457 break;
458 case MVT::f32:
Nate Begemanf63be7d2005-07-06 18:59:04 +0000459 if (X86ScalarSSE)
460 RetVals.push_back(MVT::f32);
461 else
462 RetVals.push_back(MVT::f64);
Chris Lattner239738a2005-05-14 08:48:15 +0000463 break;
464 case MVT::i64:
465 RetVals.push_back(MVT::i32);
466 RetVals.push_back(MVT::i32);
467 break;
468 }
469 std::vector<SDOperand> Ops;
470 Ops.push_back(Chain);
471 Ops.push_back(Callee);
472 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
473 Ops.push_back(DAG.getConstant(0, getPointerTy()));
474 SDOperand TheCall = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
475 RetVals, Ops);
476 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, TheCall);
477
478 SDOperand ResultVal;
479 switch (RetTyVT) {
480 case MVT::isVoid: break;
481 default:
482 ResultVal = TheCall.getValue(1);
483 break;
484 case MVT::i1:
485 case MVT::i8:
486 case MVT::i16:
487 ResultVal = DAG.getNode(ISD::TRUNCATE, RetTyVT, TheCall.getValue(1));
488 break;
489 case MVT::f32:
490 // FIXME: we would really like to remember that this FP_ROUND operation is
491 // okay to eliminate if we allow excess FP precision.
492 ResultVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, TheCall.getValue(1));
493 break;
494 case MVT::i64:
495 ResultVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, TheCall.getValue(1),
496 TheCall.getValue(2));
497 break;
498 }
499
500 return std::make_pair(ResultVal, Chain);
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000501}
502
Chris Lattnere0fe2252005-07-05 19:58:54 +0000503SDOperand
504X86TargetLowering::LowerVAStart(SDOperand Chain, SDOperand VAListP,
505 Value *VAListV, SelectionDAG &DAG) {
Andrew Lenharth558bc882005-06-18 18:34:52 +0000506 // vastart just stores the address of the VarArgsFrameIndex slot.
507 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32);
Chris Lattnere0fe2252005-07-05 19:58:54 +0000508 return DAG.getNode(ISD::STORE, MVT::Other, Chain, FR, VAListP,
509 DAG.getSrcValue(VAListV));
Chris Lattner14824582005-01-09 00:01:27 +0000510}
511
Chris Lattnere0fe2252005-07-05 19:58:54 +0000512
513std::pair<SDOperand,SDOperand>
514X86TargetLowering::LowerVAArg(SDOperand Chain, SDOperand VAListP,
515 Value *VAListV, const Type *ArgTy,
516 SelectionDAG &DAG) {
Chris Lattner14824582005-01-09 00:01:27 +0000517 MVT::ValueType ArgVT = getValueType(ArgTy);
Chris Lattnere0fe2252005-07-05 19:58:54 +0000518 SDOperand Val = DAG.getLoad(MVT::i32, Chain,
519 VAListP, DAG.getSrcValue(VAListV));
520 SDOperand Result = DAG.getLoad(ArgVT, Chain, Val,
Chris Lattner08568cf2005-07-05 17:50:16 +0000521 DAG.getSrcValue(NULL));
Andrew Lenharth558bc882005-06-18 18:34:52 +0000522 unsigned Amt;
523 if (ArgVT == MVT::i32)
524 Amt = 4;
525 else {
526 assert((ArgVT == MVT::i64 || ArgVT == MVT::f64) &&
527 "Other types should have been promoted for varargs!");
528 Amt = 8;
Chris Lattner14824582005-01-09 00:01:27 +0000529 }
Andrew Lenharth558bc882005-06-18 18:34:52 +0000530 Val = DAG.getNode(ISD::ADD, Val.getValueType(), Val,
531 DAG.getConstant(Amt, Val.getValueType()));
532 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain,
Chris Lattnere0fe2252005-07-05 19:58:54 +0000533 Val, VAListP, DAG.getSrcValue(VAListV));
Chris Lattner14824582005-01-09 00:01:27 +0000534 return std::make_pair(Result, Chain);
535}
Misha Brukman0e0a7a452005-04-21 23:38:14 +0000536
Chris Lattnerc6f41812005-05-12 23:06:28 +0000537//===----------------------------------------------------------------------===//
Chris Lattner653f7232005-05-13 22:46:57 +0000538// Fast Calling Convention implementation
Chris Lattnerc6f41812005-05-12 23:06:28 +0000539//===----------------------------------------------------------------------===//
540//
541// The X86 'fast' calling convention passes up to two integer arguments in
542// registers (an appropriate portion of EAX/EDX), passes arguments in C order,
543// and requires that the callee pop its arguments off the stack (allowing proper
544// tail calls), and has the same return value conventions as C calling convs.
545//
Chris Lattner10d26452005-05-13 23:49:10 +0000546// This calling convention always arranges for the callee pop value to be 8n+4
547// bytes, which is needed for tail recursion elimination and stack alignment
548// reasons.
549//
Chris Lattnerc6f41812005-05-12 23:06:28 +0000550// Note that this can be enhanced in the future to pass fp vals in registers
551// (when we have a global fp allocator) and do other tricks.
552//
Chris Lattner63602fb2005-05-13 07:38:09 +0000553
554/// AddLiveIn - This helper function adds the specified physical register to the
555/// MachineFunction as a live in value. It also creates a corresponding virtual
556/// register for it.
557static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg,
558 TargetRegisterClass *RC) {
559 assert(RC->contains(PReg) && "Not the correct regclass!");
560 unsigned VReg = MF.getSSARegMap()->createVirtualRegister(RC);
561 MF.addLiveIn(PReg, VReg);
562 return VReg;
563}
564
565
Chris Lattnerc6f41812005-05-12 23:06:28 +0000566std::vector<SDOperand>
567X86TargetLowering::LowerFastCCArguments(Function &F, SelectionDAG &DAG) {
568 std::vector<SDOperand> ArgValues;
569
570 MachineFunction &MF = DAG.getMachineFunction();
571 MachineFrameInfo *MFI = MF.getFrameInfo();
572
573 // Add DAG nodes to load the arguments... On entry to a function the stack
574 // frame looks like this:
575 //
576 // [ESP] -- return address
577 // [ESP + 4] -- first nonreg argument (leftmost lexically)
578 // [ESP + 8] -- second nonreg argument, if first argument is 4 bytes in size
579 // ...
580 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
581
582 // Keep track of the number of integer regs passed so far. This can be either
583 // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
584 // used).
585 unsigned NumIntRegs = 0;
586
587 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
588 MVT::ValueType ObjectVT = getValueType(I->getType());
589 unsigned ArgIncrement = 4;
590 unsigned ObjSize = 0;
591 SDOperand ArgValue;
Jeff Cohen00b168892005-07-27 06:12:32 +0000592
Chris Lattnerc6f41812005-05-12 23:06:28 +0000593 switch (ObjectVT) {
594 default: assert(0 && "Unhandled argument type!");
595 case MVT::i1:
596 case MVT::i8:
597 if (NumIntRegs < 2) {
598 if (!I->use_empty()) {
Chris Lattner63602fb2005-05-13 07:38:09 +0000599 unsigned VReg = AddLiveIn(MF, NumIntRegs ? X86::DL : X86::AL,
600 X86::R8RegisterClass);
601 ArgValue = DAG.getCopyFromReg(VReg, MVT::i8, DAG.getRoot());
Chris Lattnerc6f41812005-05-12 23:06:28 +0000602 DAG.setRoot(ArgValue.getValue(1));
603 }
604 ++NumIntRegs;
605 break;
606 }
607
608 ObjSize = 1;
609 break;
610 case MVT::i16:
611 if (NumIntRegs < 2) {
612 if (!I->use_empty()) {
Chris Lattner63602fb2005-05-13 07:38:09 +0000613 unsigned VReg = AddLiveIn(MF, NumIntRegs ? X86::DX : X86::AX,
614 X86::R16RegisterClass);
615 ArgValue = DAG.getCopyFromReg(VReg, MVT::i16, DAG.getRoot());
Chris Lattnerc6f41812005-05-12 23:06:28 +0000616 DAG.setRoot(ArgValue.getValue(1));
617 }
618 ++NumIntRegs;
619 break;
620 }
621 ObjSize = 2;
622 break;
623 case MVT::i32:
624 if (NumIntRegs < 2) {
625 if (!I->use_empty()) {
Chris Lattner63602fb2005-05-13 07:38:09 +0000626 unsigned VReg = AddLiveIn(MF,NumIntRegs ? X86::EDX : X86::EAX,
627 X86::R32RegisterClass);
628 ArgValue = DAG.getCopyFromReg(VReg, MVT::i32, DAG.getRoot());
Chris Lattnerc6f41812005-05-12 23:06:28 +0000629 DAG.setRoot(ArgValue.getValue(1));
630 }
631 ++NumIntRegs;
632 break;
633 }
634 ObjSize = 4;
635 break;
636 case MVT::i64:
637 if (NumIntRegs == 0) {
638 if (!I->use_empty()) {
Chris Lattner63602fb2005-05-13 07:38:09 +0000639 unsigned BotReg = AddLiveIn(MF, X86::EAX, X86::R32RegisterClass);
640 unsigned TopReg = AddLiveIn(MF, X86::EDX, X86::R32RegisterClass);
Chris Lattnerc6f41812005-05-12 23:06:28 +0000641
Chris Lattner63602fb2005-05-13 07:38:09 +0000642 SDOperand Low=DAG.getCopyFromReg(BotReg, MVT::i32, DAG.getRoot());
643 SDOperand Hi =DAG.getCopyFromReg(TopReg, MVT::i32, Low.getValue(1));
Chris Lattnerc6f41812005-05-12 23:06:28 +0000644 DAG.setRoot(Hi.getValue(1));
645
646 ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Low, Hi);
647 }
648 NumIntRegs = 2;
649 break;
650 } else if (NumIntRegs == 1) {
651 if (!I->use_empty()) {
Chris Lattner63602fb2005-05-13 07:38:09 +0000652 unsigned BotReg = AddLiveIn(MF, X86::EDX, X86::R32RegisterClass);
653 SDOperand Low = DAG.getCopyFromReg(BotReg, MVT::i32, DAG.getRoot());
Chris Lattnerc6f41812005-05-12 23:06:28 +0000654 DAG.setRoot(Low.getValue(1));
655
656 // Load the high part from memory.
657 // Create the frame index object for this incoming parameter...
658 int FI = MFI->CreateFixedObject(4, ArgOffset);
659 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
660 SDOperand Hi = DAG.getLoad(MVT::i32, DAG.getEntryNode(), FIN,
661 DAG.getSrcValue(NULL));
662 ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Low, Hi);
663 }
664 ArgOffset += 4;
665 NumIntRegs = 2;
666 break;
667 }
668 ObjSize = ArgIncrement = 8;
669 break;
670 case MVT::f32: ObjSize = 4; break;
671 case MVT::f64: ObjSize = ArgIncrement = 8; break;
672 }
673
674 // Don't codegen dead arguments. FIXME: remove this check when we can nuke
675 // dead loads.
676 if (ObjSize && !I->use_empty()) {
677 // Create the frame index object for this incoming parameter...
678 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
679
680 // Create the SelectionDAG nodes corresponding to a load from this
681 // parameter.
682 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
683
684 ArgValue = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN,
685 DAG.getSrcValue(NULL));
686 } else if (ArgValue.Val == 0) {
687 if (MVT::isInteger(ObjectVT))
688 ArgValue = DAG.getConstant(0, ObjectVT);
689 else
690 ArgValue = DAG.getConstantFP(0, ObjectVT);
691 }
692 ArgValues.push_back(ArgValue);
693
694 if (ObjSize)
695 ArgOffset += ArgIncrement; // Move on to the next argument.
696 }
697
Chris Lattner10d26452005-05-13 23:49:10 +0000698 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
699 // arguments and the arguments after the retaddr has been pushed are aligned.
700 if ((ArgOffset & 7) == 0)
701 ArgOffset += 4;
702
Chris Lattner3648c672005-05-13 21:44:04 +0000703 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs.
704 ReturnAddrIndex = 0; // No return address slot generated yet.
705 BytesToPopOnReturn = ArgOffset; // Callee pops all stack arguments.
Chris Lattner381e8872005-05-15 05:46:45 +0000706 BytesCallerReserves = 0;
Chris Lattnerc6f41812005-05-12 23:06:28 +0000707
708 // Finally, inform the code generator which regs we return values in.
709 switch (getValueType(F.getReturnType())) {
710 default: assert(0 && "Unknown type!");
711 case MVT::isVoid: break;
712 case MVT::i1:
713 case MVT::i8:
714 case MVT::i16:
715 case MVT::i32:
716 MF.addLiveOut(X86::EAX);
717 break;
718 case MVT::i64:
719 MF.addLiveOut(X86::EAX);
720 MF.addLiveOut(X86::EDX);
721 break;
722 case MVT::f32:
723 case MVT::f64:
724 MF.addLiveOut(X86::ST0);
725 break;
726 }
727 return ArgValues;
728}
729
730std::pair<SDOperand, SDOperand>
731X86TargetLowering::LowerFastCCCallTo(SDOperand Chain, const Type *RetTy,
Chris Lattner2e7714a2005-05-13 20:29:13 +0000732 bool isTailCall, SDOperand Callee,
Chris Lattnerc6f41812005-05-12 23:06:28 +0000733 ArgListTy &Args, SelectionDAG &DAG) {
734 // Count how many bytes are to be pushed on the stack.
735 unsigned NumBytes = 0;
736
737 // Keep track of the number of integer regs passed so far. This can be either
738 // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
739 // used).
740 unsigned NumIntRegs = 0;
741
742 for (unsigned i = 0, e = Args.size(); i != e; ++i)
743 switch (getValueType(Args[i].second)) {
744 default: assert(0 && "Unknown value type!");
745 case MVT::i1:
746 case MVT::i8:
747 case MVT::i16:
748 case MVT::i32:
749 if (NumIntRegs < 2) {
750 ++NumIntRegs;
751 break;
752 }
753 // fall through
754 case MVT::f32:
755 NumBytes += 4;
756 break;
757 case MVT::i64:
758 if (NumIntRegs == 0) {
759 NumIntRegs = 2;
760 break;
761 } else if (NumIntRegs == 1) {
762 NumIntRegs = 2;
763 NumBytes += 4;
764 break;
765 }
766
767 // fall through
768 case MVT::f64:
769 NumBytes += 8;
770 break;
771 }
772
Chris Lattner10d26452005-05-13 23:49:10 +0000773 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
774 // arguments and the arguments after the retaddr has been pushed are aligned.
775 if ((NumBytes & 7) == 0)
776 NumBytes += 4;
777
Chris Lattner16cd04d2005-05-12 23:24:06 +0000778 Chain = DAG.getNode(ISD::CALLSEQ_START, MVT::Other, Chain,
Chris Lattnerc6f41812005-05-12 23:06:28 +0000779 DAG.getConstant(NumBytes, getPointerTy()));
780
781 // Arguments go on the stack in reverse order, as specified by the ABI.
782 unsigned ArgOffset = 0;
783 SDOperand StackPtr = DAG.getCopyFromReg(X86::ESP, MVT::i32,
784 DAG.getEntryNode());
785 NumIntRegs = 0;
786 std::vector<SDOperand> Stores;
787 std::vector<SDOperand> RegValuesToPass;
788 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
789 switch (getValueType(Args[i].second)) {
790 default: assert(0 && "Unexpected ValueType for argument!");
791 case MVT::i1:
792 case MVT::i8:
793 case MVT::i16:
794 case MVT::i32:
795 if (NumIntRegs < 2) {
796 RegValuesToPass.push_back(Args[i].first);
797 ++NumIntRegs;
798 break;
799 }
800 // Fall through
801 case MVT::f32: {
802 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
803 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
804 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
805 Args[i].first, PtrOff,
806 DAG.getSrcValue(NULL)));
807 ArgOffset += 4;
808 break;
809 }
810 case MVT::i64:
811 if (NumIntRegs < 2) { // Can pass part of it in regs?
812 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
813 Args[i].first, DAG.getConstant(1, MVT::i32));
814 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
815 Args[i].first, DAG.getConstant(0, MVT::i32));
816 RegValuesToPass.push_back(Lo);
817 ++NumIntRegs;
818 if (NumIntRegs < 2) { // Pass both parts in regs?
819 RegValuesToPass.push_back(Hi);
820 ++NumIntRegs;
821 } else {
822 // Pass the high part in memory.
823 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
824 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
825 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
Chris Lattner920c0aa2005-05-14 12:03:10 +0000826 Hi, PtrOff, DAG.getSrcValue(NULL)));
Chris Lattnerc6f41812005-05-12 23:06:28 +0000827 ArgOffset += 4;
828 }
829 break;
830 }
831 // Fall through
832 case MVT::f64:
833 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
834 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
835 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
836 Args[i].first, PtrOff,
837 DAG.getSrcValue(NULL)));
838 ArgOffset += 8;
839 break;
840 }
841 }
842 if (!Stores.empty())
843 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, Stores);
844
Chris Lattner10d26452005-05-13 23:49:10 +0000845 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
846 // arguments and the arguments after the retaddr has been pushed are aligned.
847 if ((ArgOffset & 7) == 0)
848 ArgOffset += 4;
849
Chris Lattner239738a2005-05-14 08:48:15 +0000850 std::vector<MVT::ValueType> RetVals;
851 MVT::ValueType RetTyVT = getValueType(RetTy);
852
853 RetVals.push_back(MVT::Other);
854
855 // The result values produced have to be legal. Promote the result.
856 switch (RetTyVT) {
857 case MVT::isVoid: break;
858 default:
859 RetVals.push_back(RetTyVT);
860 break;
861 case MVT::i1:
862 case MVT::i8:
863 case MVT::i16:
864 RetVals.push_back(MVT::i32);
865 break;
866 case MVT::f32:
Nate Begemanf63be7d2005-07-06 18:59:04 +0000867 if (X86ScalarSSE)
868 RetVals.push_back(MVT::f32);
869 else
870 RetVals.push_back(MVT::f64);
Chris Lattner239738a2005-05-14 08:48:15 +0000871 break;
872 case MVT::i64:
873 RetVals.push_back(MVT::i32);
874 RetVals.push_back(MVT::i32);
875 break;
876 }
877
878 std::vector<SDOperand> Ops;
879 Ops.push_back(Chain);
880 Ops.push_back(Callee);
881 Ops.push_back(DAG.getConstant(ArgOffset, getPointerTy()));
882 // Callee pops all arg values on the stack.
883 Ops.push_back(DAG.getConstant(ArgOffset, getPointerTy()));
884
885 // Pass register arguments as needed.
886 Ops.insert(Ops.end(), RegValuesToPass.begin(), RegValuesToPass.end());
887
888 SDOperand TheCall = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
889 RetVals, Ops);
890 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, TheCall);
891
892 SDOperand ResultVal;
893 switch (RetTyVT) {
894 case MVT::isVoid: break;
895 default:
896 ResultVal = TheCall.getValue(1);
897 break;
898 case MVT::i1:
899 case MVT::i8:
900 case MVT::i16:
901 ResultVal = DAG.getNode(ISD::TRUNCATE, RetTyVT, TheCall.getValue(1));
902 break;
903 case MVT::f32:
904 // FIXME: we would really like to remember that this FP_ROUND operation is
905 // okay to eliminate if we allow excess FP precision.
906 ResultVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, TheCall.getValue(1));
907 break;
908 case MVT::i64:
909 ResultVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, TheCall.getValue(1),
910 TheCall.getValue(2));
911 break;
912 }
913
914 return std::make_pair(ResultVal, Chain);
Chris Lattnerc6f41812005-05-12 23:06:28 +0000915}
916
Chris Lattner381e8872005-05-15 05:46:45 +0000917SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) {
918 if (ReturnAddrIndex == 0) {
919 // Set up a frame object for the return address.
920 MachineFunction &MF = DAG.getMachineFunction();
921 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4);
922 }
923
924 return DAG.getFrameIndex(ReturnAddrIndex, MVT::i32);
925}
Chris Lattnerc6f41812005-05-12 23:06:28 +0000926
927
Chris Lattner14824582005-01-09 00:01:27 +0000928
929std::pair<SDOperand, SDOperand> X86TargetLowering::
930LowerFrameReturnAddress(bool isFrameAddress, SDOperand Chain, unsigned Depth,
931 SelectionDAG &DAG) {
932 SDOperand Result;
933 if (Depth) // Depths > 0 not supported yet!
934 Result = DAG.getConstant(0, getPointerTy());
935 else {
Chris Lattner381e8872005-05-15 05:46:45 +0000936 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG);
Chris Lattner14824582005-01-09 00:01:27 +0000937 if (!isFrameAddress)
938 // Just load the return address
Chris Lattnerc6f41812005-05-12 23:06:28 +0000939 Result = DAG.getLoad(MVT::i32, DAG.getEntryNode(), RetAddrFI,
940 DAG.getSrcValue(NULL));
Chris Lattner14824582005-01-09 00:01:27 +0000941 else
942 Result = DAG.getNode(ISD::SUB, MVT::i32, RetAddrFI,
943 DAG.getConstant(4, MVT::i32));
944 }
945 return std::make_pair(Result, Chain);
946}
Chris Lattner8acb1ba2005-01-07 07:49:41 +0000947
Chris Lattnera28381c2005-07-16 00:28:20 +0000948//===----------------------------------------------------------------------===//
949// X86 Custom Lowering Hooks
950//===----------------------------------------------------------------------===//
951
Chris Lattner67649df2005-05-14 06:52:07 +0000952/// LowerOperation - Provide custom lowering hooks for some operations.
953///
954SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
955 switch (Op.getOpcode()) {
956 default: assert(0 && "Should not custom lower this!");
Chris Lattner745d5382005-07-29 00:40:01 +0000957 case ISD::SINT_TO_FP: {
Chris Lattner67649df2005-05-14 06:52:07 +0000958 assert(Op.getValueType() == MVT::f64 &&
959 Op.getOperand(0).getValueType() == MVT::i64 &&
960 "Unknown SINT_TO_FP to lower!");
961 // We lower sint64->FP into a store to a temporary stack slot, followed by a
962 // FILD64m node.
963 MachineFunction &MF = DAG.getMachineFunction();
964 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
965 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
966 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, DAG.getEntryNode(),
967 Op.getOperand(0), StackSlot, DAG.getSrcValue(NULL));
968 std::vector<MVT::ValueType> RTs;
969 RTs.push_back(MVT::f64);
970 RTs.push_back(MVT::Other);
971 std::vector<SDOperand> Ops;
972 Ops.push_back(Store);
973 Ops.push_back(StackSlot);
974 return DAG.getNode(X86ISD::FILD64m, RTs, Ops);
975 }
Chris Lattner745d5382005-07-29 00:40:01 +0000976 case ISD::FP_TO_SINT: {
977 assert(Op.getValueType() == MVT::i64 &&
978 Op.getOperand(0).getValueType() == MVT::f64 &&
979 "Unknown FP_TO_SINT to lower!");
980 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary
981 // stack slot.
982 MachineFunction &MF = DAG.getMachineFunction();
983 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
984 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
985
986 // Build the FISTP64
987 std::vector<SDOperand> Ops;
988 Ops.push_back(DAG.getEntryNode());
989 Ops.push_back(Op.getOperand(0));
990 Ops.push_back(StackSlot);
991 SDOperand FISTP = DAG.getNode(X86ISD::FISTP64m, MVT::Other, Ops);
992
993 // Load the result.
994 return DAG.getLoad(MVT::i64, FISTP, StackSlot, DAG.getSrcValue(NULL));
995 }
996 }
Chris Lattner67649df2005-05-14 06:52:07 +0000997}
998
999
1000//===----------------------------------------------------------------------===//
1001// Pattern Matcher Implementation
1002//===----------------------------------------------------------------------===//
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001003
Chris Lattner98a8ba02005-01-18 01:06:26 +00001004namespace {
1005 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses
1006 /// SDOperand's instead of register numbers for the leaves of the matched
1007 /// tree.
1008 struct X86ISelAddressMode {
1009 enum {
1010 RegBase,
1011 FrameIndexBase,
1012 } BaseType;
Misha Brukman0e0a7a452005-04-21 23:38:14 +00001013
Chris Lattner98a8ba02005-01-18 01:06:26 +00001014 struct { // This is really a union, discriminated by BaseType!
1015 SDOperand Reg;
1016 int FrameIndex;
1017 } Base;
Misha Brukman0e0a7a452005-04-21 23:38:14 +00001018
Chris Lattner98a8ba02005-01-18 01:06:26 +00001019 unsigned Scale;
1020 SDOperand IndexReg;
1021 unsigned Disp;
1022 GlobalValue *GV;
Misha Brukman0e0a7a452005-04-21 23:38:14 +00001023
Chris Lattner98a8ba02005-01-18 01:06:26 +00001024 X86ISelAddressMode()
1025 : BaseType(RegBase), Scale(1), IndexReg(), Disp(), GV(0) {
1026 }
1027 };
1028}
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001029
1030
1031namespace {
1032 Statistic<>
1033 NumFPKill("x86-codegen", "Number of FP_REG_KILL instructions added");
1034
1035 //===--------------------------------------------------------------------===//
1036 /// ISel - X86 specific code to select X86 machine instructions for
1037 /// SelectionDAG operations.
1038 ///
1039 class ISel : public SelectionDAGISel {
1040 /// ContainsFPCode - Every instruction we select that uses or defines a FP
1041 /// register should set this to true.
1042 bool ContainsFPCode;
1043
1044 /// X86Lowering - This object fully describes how to lower LLVM code to an
1045 /// X86-specific SelectionDAG.
1046 X86TargetLowering X86Lowering;
1047
Chris Lattner11333092005-01-11 03:11:44 +00001048 /// RegPressureMap - This keeps an approximate count of the number of
1049 /// registers required to evaluate each node in the graph.
1050 std::map<SDNode*, unsigned> RegPressureMap;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001051
1052 /// ExprMap - As shared expressions are codegen'd, we keep track of which
1053 /// vreg the value is produced in, so we only emit one copy of each compiled
1054 /// tree.
1055 std::map<SDOperand, unsigned> ExprMap;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001056
Chris Lattner381e8872005-05-15 05:46:45 +00001057 /// TheDAG - The DAG being selected during Select* operations.
1058 SelectionDAG *TheDAG;
Jeff Cohen00b168892005-07-27 06:12:32 +00001059
1060 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
Nate Begemanfb5792f2005-07-12 01:41:54 +00001061 /// make the right decision when generating code for different targets.
1062 const X86Subtarget *Subtarget;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001063 public:
1064 ISel(TargetMachine &TM) : SelectionDAGISel(X86Lowering), X86Lowering(TM) {
Nate Begemanfb5792f2005-07-12 01:41:54 +00001065 Subtarget = TM.getSubtarget<const X86Subtarget>();
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001066 }
1067
Chris Lattner67b1c3c2005-01-21 21:35:14 +00001068 virtual const char *getPassName() const {
1069 return "X86 Pattern Instruction Selection";
1070 }
1071
Chris Lattner11333092005-01-11 03:11:44 +00001072 unsigned getRegPressure(SDOperand O) {
1073 return RegPressureMap[O.Val];
1074 }
1075 unsigned ComputeRegPressure(SDOperand O);
1076
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001077 /// InstructionSelectBasicBlock - This callback is invoked by
1078 /// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
Chris Lattner7dbcb752005-01-12 04:21:28 +00001079 virtual void InstructionSelectBasicBlock(SelectionDAG &DAG);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001080
Chris Lattner63602fb2005-05-13 07:38:09 +00001081 virtual void EmitFunctionEntryCode(Function &Fn, MachineFunction &MF);
1082
Chris Lattner44129b52005-01-25 20:03:11 +00001083 bool isFoldableLoad(SDOperand Op, SDOperand OtherOp,
1084 bool FloatPromoteOk = false);
Chris Lattnera5ade062005-01-11 21:19:59 +00001085 void EmitFoldedLoad(SDOperand Op, X86AddressMode &AM);
Chris Lattnere10269b2005-01-17 19:25:26 +00001086 bool TryToFoldLoadOpStore(SDNode *Node);
Chris Lattner30ea1e92005-01-19 07:37:26 +00001087 bool EmitOrOpOp(SDOperand Op1, SDOperand Op2, unsigned DestReg);
Chris Lattnercb1aa8d2005-01-17 01:34:14 +00001088 void EmitCMP(SDOperand LHS, SDOperand RHS, bool isOnlyUse);
Chris Lattner6c07aee2005-01-11 04:06:27 +00001089 bool EmitBranchCC(MachineBasicBlock *Dest, SDOperand Chain, SDOperand Cond);
Chris Lattner24aad1b2005-01-10 22:10:13 +00001090 void EmitSelectCC(SDOperand Cond, MVT::ValueType SVT,
1091 unsigned RTrue, unsigned RFalse, unsigned RDest);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001092 unsigned SelectExpr(SDOperand N);
Chris Lattner98a8ba02005-01-18 01:06:26 +00001093
1094 X86AddressMode SelectAddrExprs(const X86ISelAddressMode &IAM);
1095 bool MatchAddress(SDOperand N, X86ISelAddressMode &AM);
1096 void SelectAddress(SDOperand N, X86AddressMode &AM);
Chris Lattner381e8872005-05-15 05:46:45 +00001097 bool EmitPotentialTailCall(SDNode *Node);
1098 void EmitFastCCToFastCCTailCall(SDNode *TailCallNode);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001099 void Select(SDOperand N);
1100 };
1101}
1102
Chris Lattner6415bb42005-05-10 03:53:18 +00001103/// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
1104/// the main function.
1105static void EmitSpecialCodeForMain(MachineBasicBlock *BB,
1106 MachineFrameInfo *MFI) {
1107 // Switch the FPU to 64-bit precision mode for better compatibility and speed.
1108 int CWFrameIdx = MFI->CreateStackObject(2, 2);
1109 addFrameReference(BuildMI(BB, X86::FNSTCW16m, 4), CWFrameIdx);
1110
1111 // Set the high part to be 64-bit precision.
1112 addFrameReference(BuildMI(BB, X86::MOV8mi, 5),
1113 CWFrameIdx, 1).addImm(2);
1114
1115 // Reload the modified control word now.
1116 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
1117}
1118
Chris Lattner63602fb2005-05-13 07:38:09 +00001119void ISel::EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) {
1120 // If this function has live-in values, emit the copies from pregs to vregs at
1121 // the top of the function, before anything else.
1122 MachineBasicBlock *BB = MF.begin();
1123 if (MF.livein_begin() != MF.livein_end()) {
1124 SSARegMap *RegMap = MF.getSSARegMap();
1125 for (MachineFunction::livein_iterator LI = MF.livein_begin(),
1126 E = MF.livein_end(); LI != E; ++LI) {
1127 const TargetRegisterClass *RC = RegMap->getRegClass(LI->second);
1128 if (RC == X86::R8RegisterClass) {
1129 BuildMI(BB, X86::MOV8rr, 1, LI->second).addReg(LI->first);
1130 } else if (RC == X86::R16RegisterClass) {
1131 BuildMI(BB, X86::MOV16rr, 1, LI->second).addReg(LI->first);
1132 } else if (RC == X86::R32RegisterClass) {
1133 BuildMI(BB, X86::MOV32rr, 1, LI->second).addReg(LI->first);
1134 } else if (RC == X86::RFPRegisterClass) {
1135 BuildMI(BB, X86::FpMOV, 1, LI->second).addReg(LI->first);
Nate Begemanf63be7d2005-07-06 18:59:04 +00001136 } else if (RC == X86::RXMMRegisterClass) {
1137 BuildMI(BB, X86::MOVAPDrr, 1, LI->second).addReg(LI->first);
Chris Lattner63602fb2005-05-13 07:38:09 +00001138 } else {
1139 assert(0 && "Unknown regclass!");
1140 }
1141 }
1142 }
1143
1144
1145 // If this is main, emit special code for main.
1146 if (Fn.hasExternalLinkage() && Fn.getName() == "main")
1147 EmitSpecialCodeForMain(BB, MF.getFrameInfo());
1148}
1149
1150
Chris Lattner7dbcb752005-01-12 04:21:28 +00001151/// InstructionSelectBasicBlock - This callback is invoked by SelectionDAGISel
1152/// when it has created a SelectionDAG for us to codegen.
1153void ISel::InstructionSelectBasicBlock(SelectionDAG &DAG) {
1154 // While we're doing this, keep track of whether we see any FP code for
1155 // FP_REG_KILL insertion.
1156 ContainsFPCode = false;
Chris Lattner6415bb42005-05-10 03:53:18 +00001157 MachineFunction *MF = BB->getParent();
Chris Lattner7dbcb752005-01-12 04:21:28 +00001158
1159 // Scan the PHI nodes that already are inserted into this basic block. If any
1160 // of them is a PHI of a floating point value, we need to insert an
1161 // FP_REG_KILL.
Chris Lattner6415bb42005-05-10 03:53:18 +00001162 SSARegMap *RegMap = MF->getSSARegMap();
Chris Lattner63602fb2005-05-13 07:38:09 +00001163 if (BB != MF->begin())
1164 for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end();
1165 I != E; ++I) {
1166 assert(I->getOpcode() == X86::PHI &&
1167 "Isn't just PHI nodes?");
1168 if (RegMap->getRegClass(I->getOperand(0).getReg()) ==
1169 X86::RFPRegisterClass) {
1170 ContainsFPCode = true;
1171 break;
1172 }
Chris Lattner7dbcb752005-01-12 04:21:28 +00001173 }
Chris Lattner6415bb42005-05-10 03:53:18 +00001174
Chris Lattner7dbcb752005-01-12 04:21:28 +00001175 // Compute the RegPressureMap, which is an approximation for the number of
1176 // registers required to compute each node.
1177 ComputeRegPressure(DAG.getRoot());
1178
Chris Lattner381e8872005-05-15 05:46:45 +00001179 TheDAG = &DAG;
1180
Chris Lattner7dbcb752005-01-12 04:21:28 +00001181 // Codegen the basic block.
1182 Select(DAG.getRoot());
1183
Chris Lattner381e8872005-05-15 05:46:45 +00001184 TheDAG = 0;
1185
Chris Lattner7dbcb752005-01-12 04:21:28 +00001186 // Finally, look at all of the successors of this block. If any contain a PHI
1187 // node of FP type, we need to insert an FP_REG_KILL in this block.
1188 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
1189 E = BB->succ_end(); SI != E && !ContainsFPCode; ++SI)
1190 for (MachineBasicBlock::iterator I = (*SI)->begin(), E = (*SI)->end();
1191 I != E && I->getOpcode() == X86::PHI; ++I) {
1192 if (RegMap->getRegClass(I->getOperand(0).getReg()) ==
1193 X86::RFPRegisterClass) {
1194 ContainsFPCode = true;
1195 break;
1196 }
1197 }
Misha Brukman0e0a7a452005-04-21 23:38:14 +00001198
Chris Lattnere3e0f272005-05-09 03:36:39 +00001199 // Final check, check LLVM BB's that are successors to the LLVM BB
1200 // corresponding to BB for FP PHI nodes.
1201 const BasicBlock *LLVMBB = BB->getBasicBlock();
1202 const PHINode *PN;
1203 if (!ContainsFPCode)
1204 for (succ_const_iterator SI = succ_begin(LLVMBB), E = succ_end(LLVMBB);
1205 SI != E && !ContainsFPCode; ++SI)
1206 for (BasicBlock::const_iterator II = SI->begin();
1207 (PN = dyn_cast<PHINode>(II)); ++II)
1208 if (PN->getType()->isFloatingPoint()) {
1209 ContainsFPCode = true;
1210 break;
1211 }
1212
1213
Chris Lattner7dbcb752005-01-12 04:21:28 +00001214 // Insert FP_REG_KILL instructions into basic blocks that need them. This
1215 // only occurs due to the floating point stackifier not being aggressive
1216 // enough to handle arbitrary global stackification.
1217 //
1218 // Currently we insert an FP_REG_KILL instruction into each block that uses or
1219 // defines a floating point virtual register.
1220 //
1221 // When the global register allocators (like linear scan) finally update live
1222 // variable analysis, we can keep floating point values in registers across
1223 // basic blocks. This will be a huge win, but we are waiting on the global
1224 // allocators before we can do this.
1225 //
Chris Lattner71df3f82005-03-30 01:10:00 +00001226 if (ContainsFPCode) {
Chris Lattner7dbcb752005-01-12 04:21:28 +00001227 BuildMI(*BB, BB->getFirstTerminator(), X86::FP_REG_KILL, 0);
1228 ++NumFPKill;
1229 }
Misha Brukman0e0a7a452005-04-21 23:38:14 +00001230
Chris Lattner7dbcb752005-01-12 04:21:28 +00001231 // Clear state used for selection.
1232 ExprMap.clear();
Chris Lattner7dbcb752005-01-12 04:21:28 +00001233 RegPressureMap.clear();
1234}
1235
1236
Chris Lattner11333092005-01-11 03:11:44 +00001237// ComputeRegPressure - Compute the RegPressureMap, which is an approximation
1238// for the number of registers required to compute each node. This is basically
1239// computing a generalized form of the Sethi-Ullman number for each node.
1240unsigned ISel::ComputeRegPressure(SDOperand O) {
1241 SDNode *N = O.Val;
1242 unsigned &Result = RegPressureMap[N];
1243 if (Result) return Result;
1244
Chris Lattnera3aa2e22005-01-11 03:37:59 +00001245 // FIXME: Should operations like CALL (which clobber lots o regs) have a
1246 // higher fixed cost??
1247
Chris Lattnerc4b6a782005-01-11 22:29:12 +00001248 if (N->getNumOperands() == 0) {
1249 Result = 1;
1250 } else {
1251 unsigned MaxRegUse = 0;
1252 unsigned NumExtraMaxRegUsers = 0;
1253 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
1254 unsigned Regs;
1255 if (N->getOperand(i).getOpcode() == ISD::Constant)
1256 Regs = 0;
1257 else
1258 Regs = ComputeRegPressure(N->getOperand(i));
1259 if (Regs > MaxRegUse) {
1260 MaxRegUse = Regs;
1261 NumExtraMaxRegUsers = 0;
1262 } else if (Regs == MaxRegUse &&
1263 N->getOperand(i).getValueType() != MVT::Other) {
1264 ++NumExtraMaxRegUsers;
1265 }
Chris Lattner11333092005-01-11 03:11:44 +00001266 }
Chris Lattner90d1be72005-01-17 22:56:09 +00001267
1268 if (O.getOpcode() != ISD::TokenFactor)
1269 Result = MaxRegUse+NumExtraMaxRegUsers;
1270 else
Chris Lattner869e0432005-01-17 23:02:13 +00001271 Result = MaxRegUse == 1 ? 0 : MaxRegUse-1;
Chris Lattnerc4b6a782005-01-11 22:29:12 +00001272 }
Chris Lattnerafce4302005-01-12 02:19:06 +00001273
Chris Lattner837caa72005-01-11 23:21:30 +00001274 //std::cerr << " WEIGHT: " << Result << " "; N->dump(); std::cerr << "\n";
Chris Lattnerc4b6a782005-01-11 22:29:12 +00001275 return Result;
Chris Lattner11333092005-01-11 03:11:44 +00001276}
1277
Chris Lattnerbf52d492005-01-20 16:50:16 +00001278/// NodeTransitivelyUsesValue - Return true if N or any of its uses uses Op.
1279/// The DAG cannot have cycles in it, by definition, so the visited set is not
1280/// needed to prevent infinite loops. The DAG CAN, however, have unbounded
1281/// reuse, so it prevents exponential cases.
1282///
1283static bool NodeTransitivelyUsesValue(SDOperand N, SDOperand Op,
1284 std::set<SDNode*> &Visited) {
1285 if (N == Op) return true; // Found it.
1286 SDNode *Node = N.Val;
Chris Lattnerfb0f53f2005-01-21 21:43:02 +00001287 if (Node->getNumOperands() == 0 || // Leaf?
1288 Node->getNodeDepth() <= Op.getNodeDepth()) return false; // Can't find it?
Chris Lattnerbf52d492005-01-20 16:50:16 +00001289 if (!Visited.insert(Node).second) return false; // Already visited?
1290
1291 // Recurse for the first N-1 operands.
1292 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i)
1293 if (NodeTransitivelyUsesValue(Node->getOperand(i), Op, Visited))
1294 return true;
1295
1296 // Tail recurse for the last operand.
1297 return NodeTransitivelyUsesValue(Node->getOperand(0), Op, Visited);
1298}
1299
Chris Lattner98a8ba02005-01-18 01:06:26 +00001300X86AddressMode ISel::SelectAddrExprs(const X86ISelAddressMode &IAM) {
1301 X86AddressMode Result;
1302
1303 // If we need to emit two register operands, emit the one with the highest
1304 // register pressure first.
1305 if (IAM.BaseType == X86ISelAddressMode::RegBase &&
1306 IAM.Base.Reg.Val && IAM.IndexReg.Val) {
Chris Lattnerbf52d492005-01-20 16:50:16 +00001307 bool EmitBaseThenIndex;
Chris Lattner98a8ba02005-01-18 01:06:26 +00001308 if (getRegPressure(IAM.Base.Reg) > getRegPressure(IAM.IndexReg)) {
Chris Lattnerbf52d492005-01-20 16:50:16 +00001309 std::set<SDNode*> Visited;
1310 EmitBaseThenIndex = true;
1311 // If Base ends up pointing to Index, we must emit index first. This is
1312 // because of the way we fold loads, we may end up doing bad things with
1313 // the folded add.
1314 if (NodeTransitivelyUsesValue(IAM.Base.Reg, IAM.IndexReg, Visited))
1315 EmitBaseThenIndex = false;
1316 } else {
1317 std::set<SDNode*> Visited;
1318 EmitBaseThenIndex = false;
1319 // If Base ends up pointing to Index, we must emit index first. This is
1320 // because of the way we fold loads, we may end up doing bad things with
1321 // the folded add.
1322 if (NodeTransitivelyUsesValue(IAM.IndexReg, IAM.Base.Reg, Visited))
1323 EmitBaseThenIndex = true;
1324 }
1325
1326 if (EmitBaseThenIndex) {
Chris Lattner98a8ba02005-01-18 01:06:26 +00001327 Result.Base.Reg = SelectExpr(IAM.Base.Reg);
1328 Result.IndexReg = SelectExpr(IAM.IndexReg);
1329 } else {
1330 Result.IndexReg = SelectExpr(IAM.IndexReg);
1331 Result.Base.Reg = SelectExpr(IAM.Base.Reg);
1332 }
Chris Lattnerbf52d492005-01-20 16:50:16 +00001333
Chris Lattner98a8ba02005-01-18 01:06:26 +00001334 } else if (IAM.BaseType == X86ISelAddressMode::RegBase && IAM.Base.Reg.Val) {
1335 Result.Base.Reg = SelectExpr(IAM.Base.Reg);
1336 } else if (IAM.IndexReg.Val) {
1337 Result.IndexReg = SelectExpr(IAM.IndexReg);
1338 }
Misha Brukman0e0a7a452005-04-21 23:38:14 +00001339
Chris Lattner98a8ba02005-01-18 01:06:26 +00001340 switch (IAM.BaseType) {
1341 case X86ISelAddressMode::RegBase:
1342 Result.BaseType = X86AddressMode::RegBase;
1343 break;
1344 case X86ISelAddressMode::FrameIndexBase:
1345 Result.BaseType = X86AddressMode::FrameIndexBase;
1346 Result.Base.FrameIndex = IAM.Base.FrameIndex;
1347 break;
1348 default:
1349 assert(0 && "Unknown base type!");
1350 break;
1351 }
1352 Result.Scale = IAM.Scale;
1353 Result.Disp = IAM.Disp;
1354 Result.GV = IAM.GV;
1355 return Result;
1356}
1357
1358/// SelectAddress - Pattern match the maximal addressing mode for this node and
1359/// emit all of the leaf registers.
1360void ISel::SelectAddress(SDOperand N, X86AddressMode &AM) {
1361 X86ISelAddressMode IAM;
1362 MatchAddress(N, IAM);
1363 AM = SelectAddrExprs(IAM);
1364}
1365
1366/// MatchAddress - Add the specified node to the specified addressing mode,
1367/// returning true if it cannot be done. This just pattern matches for the
1368/// addressing mode, it does not cause any code to be emitted. For that, use
1369/// SelectAddress.
1370bool ISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM) {
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001371 switch (N.getOpcode()) {
1372 default: break;
1373 case ISD::FrameIndex:
Chris Lattner98a8ba02005-01-18 01:06:26 +00001374 if (AM.BaseType == X86ISelAddressMode::RegBase && AM.Base.Reg.Val == 0) {
1375 AM.BaseType = X86ISelAddressMode::FrameIndexBase;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001376 AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
1377 return false;
1378 }
1379 break;
1380 case ISD::GlobalAddress:
1381 if (AM.GV == 0) {
Nate Begemanfb5792f2005-07-12 01:41:54 +00001382 GlobalValue *GV = cast<GlobalAddressSDNode>(N)->getGlobal();
1383 // For Darwin, external and weak symbols are indirect, so we want to load
1384 // the value at address GV, not the value of GV itself. This means that
1385 // the GlobalAddress must be in the base or index register of the address,
1386 // not the GV offset field.
Jeff Cohen00b168892005-07-27 06:12:32 +00001387 if (Subtarget->getIndirectExternAndWeakGlobals() &&
Nate Begemanfb5792f2005-07-12 01:41:54 +00001388 (GV->hasWeakLinkage() || GV->isExternal())) {
1389 break;
1390 } else {
1391 AM.GV = GV;
1392 return false;
1393 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001394 }
1395 break;
1396 case ISD::Constant:
1397 AM.Disp += cast<ConstantSDNode>(N)->getValue();
1398 return false;
1399 case ISD::SHL:
Chris Lattner636e79a2005-01-13 05:53:16 +00001400 // We might have folded the load into this shift, so don't regen the value
1401 // if so.
1402 if (ExprMap.count(N)) break;
1403
Chris Lattner98a8ba02005-01-18 01:06:26 +00001404 if (AM.IndexReg.Val == 0 && AM.Scale == 1)
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001405 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1))) {
1406 unsigned Val = CN->getValue();
1407 if (Val == 1 || Val == 2 || Val == 3) {
1408 AM.Scale = 1 << Val;
Chris Lattner51a26342005-01-11 06:36:20 +00001409 SDOperand ShVal = N.Val->getOperand(0);
1410
1411 // Okay, we know that we have a scale by now. However, if the scaled
1412 // value is an add of something and a constant, we can fold the
1413 // constant into the disp field here.
Chris Lattner811482a2005-01-18 04:18:32 +00001414 if (ShVal.Val->getOpcode() == ISD::ADD && ShVal.hasOneUse() &&
Chris Lattner51a26342005-01-11 06:36:20 +00001415 isa<ConstantSDNode>(ShVal.Val->getOperand(1))) {
Chris Lattner98a8ba02005-01-18 01:06:26 +00001416 AM.IndexReg = ShVal.Val->getOperand(0);
Chris Lattner51a26342005-01-11 06:36:20 +00001417 ConstantSDNode *AddVal =
1418 cast<ConstantSDNode>(ShVal.Val->getOperand(1));
1419 AM.Disp += AddVal->getValue() << Val;
Chris Lattner636e79a2005-01-13 05:53:16 +00001420 } else {
Chris Lattner98a8ba02005-01-18 01:06:26 +00001421 AM.IndexReg = ShVal;
Chris Lattner51a26342005-01-11 06:36:20 +00001422 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001423 return false;
1424 }
1425 }
1426 break;
Chris Lattner947d5442005-01-11 19:37:02 +00001427 case ISD::MUL:
Chris Lattner636e79a2005-01-13 05:53:16 +00001428 // We might have folded the load into this mul, so don't regen the value if
1429 // so.
1430 if (ExprMap.count(N)) break;
1431
Chris Lattner947d5442005-01-11 19:37:02 +00001432 // X*[3,5,9] -> X+X*[2,4,8]
Chris Lattner98a8ba02005-01-18 01:06:26 +00001433 if (AM.IndexReg.Val == 0 && AM.BaseType == X86ISelAddressMode::RegBase &&
1434 AM.Base.Reg.Val == 0)
Chris Lattner947d5442005-01-11 19:37:02 +00001435 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1)))
1436 if (CN->getValue() == 3 || CN->getValue() == 5 || CN->getValue() == 9) {
1437 AM.Scale = unsigned(CN->getValue())-1;
1438
1439 SDOperand MulVal = N.Val->getOperand(0);
Chris Lattner98a8ba02005-01-18 01:06:26 +00001440 SDOperand Reg;
Chris Lattner947d5442005-01-11 19:37:02 +00001441
1442 // Okay, we know that we have a scale by now. However, if the scaled
1443 // value is an add of something and a constant, we can fold the
1444 // constant into the disp field here.
Chris Lattner811482a2005-01-18 04:18:32 +00001445 if (MulVal.Val->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
Chris Lattner947d5442005-01-11 19:37:02 +00001446 isa<ConstantSDNode>(MulVal.Val->getOperand(1))) {
Chris Lattner98a8ba02005-01-18 01:06:26 +00001447 Reg = MulVal.Val->getOperand(0);
Chris Lattner947d5442005-01-11 19:37:02 +00001448 ConstantSDNode *AddVal =
1449 cast<ConstantSDNode>(MulVal.Val->getOperand(1));
1450 AM.Disp += AddVal->getValue() * CN->getValue();
Misha Brukman0e0a7a452005-04-21 23:38:14 +00001451 } else {
Chris Lattner98a8ba02005-01-18 01:06:26 +00001452 Reg = N.Val->getOperand(0);
Chris Lattner947d5442005-01-11 19:37:02 +00001453 }
1454
1455 AM.IndexReg = AM.Base.Reg = Reg;
1456 return false;
1457 }
1458 break;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001459
1460 case ISD::ADD: {
Chris Lattner636e79a2005-01-13 05:53:16 +00001461 // We might have folded the load into this mul, so don't regen the value if
1462 // so.
1463 if (ExprMap.count(N)) break;
1464
Chris Lattner98a8ba02005-01-18 01:06:26 +00001465 X86ISelAddressMode Backup = AM;
1466 if (!MatchAddress(N.Val->getOperand(0), AM) &&
1467 !MatchAddress(N.Val->getOperand(1), AM))
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001468 return false;
1469 AM = Backup;
Chris Lattner98a8ba02005-01-18 01:06:26 +00001470 if (!MatchAddress(N.Val->getOperand(1), AM) &&
1471 !MatchAddress(N.Val->getOperand(0), AM))
Chris Lattner9bbd9922005-01-12 18:08:53 +00001472 return false;
1473 AM = Backup;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001474 break;
1475 }
1476 }
1477
Chris Lattnera95589b2005-01-11 04:40:19 +00001478 // Is the base register already occupied?
Chris Lattner98a8ba02005-01-18 01:06:26 +00001479 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base.Reg.Val) {
Chris Lattnera95589b2005-01-11 04:40:19 +00001480 // If so, check to see if the scale index register is set.
Chris Lattner98a8ba02005-01-18 01:06:26 +00001481 if (AM.IndexReg.Val == 0) {
1482 AM.IndexReg = N;
Chris Lattnera95589b2005-01-11 04:40:19 +00001483 AM.Scale = 1;
1484 return false;
1485 }
1486
1487 // Otherwise, we cannot select it.
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001488 return true;
Chris Lattnera95589b2005-01-11 04:40:19 +00001489 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001490
1491 // Default, generate it as a register.
Chris Lattner98a8ba02005-01-18 01:06:26 +00001492 AM.BaseType = X86ISelAddressMode::RegBase;
1493 AM.Base.Reg = N;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001494 return false;
1495}
1496
1497/// Emit2SetCCsAndLogical - Emit the following sequence of instructions,
1498/// assuming that the temporary registers are in the 8-bit register class.
1499///
1500/// Tmp1 = setcc1
1501/// Tmp2 = setcc2
1502/// DestReg = logicalop Tmp1, Tmp2
1503///
1504static void Emit2SetCCsAndLogical(MachineBasicBlock *BB, unsigned SetCC1,
1505 unsigned SetCC2, unsigned LogicalOp,
1506 unsigned DestReg) {
1507 SSARegMap *RegMap = BB->getParent()->getSSARegMap();
1508 unsigned Tmp1 = RegMap->createVirtualRegister(X86::R8RegisterClass);
1509 unsigned Tmp2 = RegMap->createVirtualRegister(X86::R8RegisterClass);
1510 BuildMI(BB, SetCC1, 0, Tmp1);
1511 BuildMI(BB, SetCC2, 0, Tmp2);
1512 BuildMI(BB, LogicalOp, 2, DestReg).addReg(Tmp1).addReg(Tmp2);
1513}
1514
1515/// EmitSetCC - Emit the code to set the specified 8-bit register to 1 if the
1516/// condition codes match the specified SetCCOpcode. Note that some conditions
1517/// require multiple instructions to generate the correct value.
1518static void EmitSetCC(MachineBasicBlock *BB, unsigned DestReg,
1519 ISD::CondCode SetCCOpcode, bool isFP) {
1520 unsigned Opc;
1521 if (!isFP) {
1522 switch (SetCCOpcode) {
1523 default: assert(0 && "Illegal integer SetCC!");
1524 case ISD::SETEQ: Opc = X86::SETEr; break;
1525 case ISD::SETGT: Opc = X86::SETGr; break;
1526 case ISD::SETGE: Opc = X86::SETGEr; break;
1527 case ISD::SETLT: Opc = X86::SETLr; break;
1528 case ISD::SETLE: Opc = X86::SETLEr; break;
1529 case ISD::SETNE: Opc = X86::SETNEr; break;
1530 case ISD::SETULT: Opc = X86::SETBr; break;
1531 case ISD::SETUGT: Opc = X86::SETAr; break;
1532 case ISD::SETULE: Opc = X86::SETBEr; break;
1533 case ISD::SETUGE: Opc = X86::SETAEr; break;
1534 }
1535 } else {
1536 // On a floating point condition, the flags are set as follows:
1537 // ZF PF CF op
1538 // 0 | 0 | 0 | X > Y
1539 // 0 | 0 | 1 | X < Y
1540 // 1 | 0 | 0 | X == Y
1541 // 1 | 1 | 1 | unordered
1542 //
1543 switch (SetCCOpcode) {
1544 default: assert(0 && "Invalid FP setcc!");
1545 case ISD::SETUEQ:
1546 case ISD::SETEQ:
1547 Opc = X86::SETEr; // True if ZF = 1
1548 break;
1549 case ISD::SETOGT:
1550 case ISD::SETGT:
1551 Opc = X86::SETAr; // True if CF = 0 and ZF = 0
1552 break;
1553 case ISD::SETOGE:
1554 case ISD::SETGE:
1555 Opc = X86::SETAEr; // True if CF = 0
1556 break;
1557 case ISD::SETULT:
1558 case ISD::SETLT:
1559 Opc = X86::SETBr; // True if CF = 1
1560 break;
1561 case ISD::SETULE:
1562 case ISD::SETLE:
1563 Opc = X86::SETBEr; // True if CF = 1 or ZF = 1
1564 break;
1565 case ISD::SETONE:
1566 case ISD::SETNE:
1567 Opc = X86::SETNEr; // True if ZF = 0
1568 break;
1569 case ISD::SETUO:
1570 Opc = X86::SETPr; // True if PF = 1
1571 break;
1572 case ISD::SETO:
1573 Opc = X86::SETNPr; // True if PF = 0
1574 break;
1575 case ISD::SETOEQ: // !PF & ZF
1576 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETEr, X86::AND8rr, DestReg);
1577 return;
1578 case ISD::SETOLT: // !PF & CF
1579 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETBr, X86::AND8rr, DestReg);
1580 return;
1581 case ISD::SETOLE: // !PF & (CF || ZF)
1582 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETBEr, X86::AND8rr, DestReg);
1583 return;
1584 case ISD::SETUGT: // PF | (!ZF & !CF)
1585 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETAr, X86::OR8rr, DestReg);
1586 return;
1587 case ISD::SETUGE: // PF | !CF
1588 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETAEr, X86::OR8rr, DestReg);
1589 return;
1590 case ISD::SETUNE: // PF | !ZF
1591 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETNEr, X86::OR8rr, DestReg);
1592 return;
1593 }
1594 }
1595 BuildMI(BB, Opc, 0, DestReg);
1596}
1597
1598
1599/// EmitBranchCC - Emit code into BB that arranges for control to transfer to
1600/// the Dest block if the Cond condition is true. If we cannot fold this
1601/// condition into the branch, return true.
1602///
Chris Lattner6c07aee2005-01-11 04:06:27 +00001603bool ISel::EmitBranchCC(MachineBasicBlock *Dest, SDOperand Chain,
1604 SDOperand Cond) {
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001605 // FIXME: Evaluate whether it would be good to emit code like (X < Y) | (A >
1606 // B) using two conditional branches instead of one condbr, two setcc's, and
1607 // an or.
1608 if ((Cond.getOpcode() == ISD::OR ||
1609 Cond.getOpcode() == ISD::AND) && Cond.Val->hasOneUse()) {
1610 // And and or set the flags for us, so there is no need to emit a TST of the
1611 // result. It is only safe to do this if there is only a single use of the
1612 // AND/OR though, otherwise we don't know it will be emitted here.
Chris Lattner6c07aee2005-01-11 04:06:27 +00001613 Select(Chain);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001614 SelectExpr(Cond);
1615 BuildMI(BB, X86::JNE, 1).addMBB(Dest);
1616 return false;
1617 }
1618
1619 // Codegen br not C -> JE.
1620 if (Cond.getOpcode() == ISD::XOR)
1621 if (ConstantSDNode *NC = dyn_cast<ConstantSDNode>(Cond.Val->getOperand(1)))
1622 if (NC->isAllOnesValue()) {
Chris Lattner6c07aee2005-01-11 04:06:27 +00001623 unsigned CondR;
1624 if (getRegPressure(Chain) > getRegPressure(Cond)) {
1625 Select(Chain);
1626 CondR = SelectExpr(Cond.Val->getOperand(0));
1627 } else {
1628 CondR = SelectExpr(Cond.Val->getOperand(0));
1629 Select(Chain);
1630 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001631 BuildMI(BB, X86::TEST8rr, 2).addReg(CondR).addReg(CondR);
1632 BuildMI(BB, X86::JE, 1).addMBB(Dest);
1633 return false;
1634 }
1635
1636 SetCCSDNode *SetCC = dyn_cast<SetCCSDNode>(Cond);
1637 if (SetCC == 0)
1638 return true; // Can only handle simple setcc's so far.
1639
1640 unsigned Opc;
1641
1642 // Handle integer conditions first.
1643 if (MVT::isInteger(SetCC->getOperand(0).getValueType())) {
1644 switch (SetCC->getCondition()) {
1645 default: assert(0 && "Illegal integer SetCC!");
1646 case ISD::SETEQ: Opc = X86::JE; break;
1647 case ISD::SETGT: Opc = X86::JG; break;
1648 case ISD::SETGE: Opc = X86::JGE; break;
1649 case ISD::SETLT: Opc = X86::JL; break;
1650 case ISD::SETLE: Opc = X86::JLE; break;
1651 case ISD::SETNE: Opc = X86::JNE; break;
1652 case ISD::SETULT: Opc = X86::JB; break;
1653 case ISD::SETUGT: Opc = X86::JA; break;
1654 case ISD::SETULE: Opc = X86::JBE; break;
1655 case ISD::SETUGE: Opc = X86::JAE; break;
1656 }
Chris Lattner6c07aee2005-01-11 04:06:27 +00001657 Select(Chain);
Chris Lattnercb1aa8d2005-01-17 01:34:14 +00001658 EmitCMP(SetCC->getOperand(0), SetCC->getOperand(1), SetCC->hasOneUse());
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001659 BuildMI(BB, Opc, 1).addMBB(Dest);
1660 return false;
1661 }
1662
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001663 unsigned Opc2 = 0; // Second branch if needed.
1664
1665 // On a floating point condition, the flags are set as follows:
1666 // ZF PF CF op
1667 // 0 | 0 | 0 | X > Y
1668 // 0 | 0 | 1 | X < Y
1669 // 1 | 0 | 0 | X == Y
1670 // 1 | 1 | 1 | unordered
1671 //
1672 switch (SetCC->getCondition()) {
1673 default: assert(0 && "Invalid FP setcc!");
1674 case ISD::SETUEQ:
1675 case ISD::SETEQ: Opc = X86::JE; break; // True if ZF = 1
1676 case ISD::SETOGT:
1677 case ISD::SETGT: Opc = X86::JA; break; // True if CF = 0 and ZF = 0
1678 case ISD::SETOGE:
1679 case ISD::SETGE: Opc = X86::JAE; break; // True if CF = 0
1680 case ISD::SETULT:
1681 case ISD::SETLT: Opc = X86::JB; break; // True if CF = 1
1682 case ISD::SETULE:
1683 case ISD::SETLE: Opc = X86::JBE; break; // True if CF = 1 or ZF = 1
1684 case ISD::SETONE:
1685 case ISD::SETNE: Opc = X86::JNE; break; // True if ZF = 0
1686 case ISD::SETUO: Opc = X86::JP; break; // True if PF = 1
1687 case ISD::SETO: Opc = X86::JNP; break; // True if PF = 0
1688 case ISD::SETUGT: // PF = 1 | (ZF = 0 & CF = 0)
1689 Opc = X86::JA; // ZF = 0 & CF = 0
1690 Opc2 = X86::JP; // PF = 1
1691 break;
1692 case ISD::SETUGE: // PF = 1 | CF = 0
1693 Opc = X86::JAE; // CF = 0
1694 Opc2 = X86::JP; // PF = 1
1695 break;
1696 case ISD::SETUNE: // PF = 1 | ZF = 0
1697 Opc = X86::JNE; // ZF = 0
1698 Opc2 = X86::JP; // PF = 1
1699 break;
1700 case ISD::SETOEQ: // PF = 0 & ZF = 1
1701 //X86::JNP, X86::JE
1702 //X86::AND8rr
1703 return true; // FIXME: Emit more efficient code for this branch.
1704 case ISD::SETOLT: // PF = 0 & CF = 1
1705 //X86::JNP, X86::JB
1706 //X86::AND8rr
1707 return true; // FIXME: Emit more efficient code for this branch.
1708 case ISD::SETOLE: // PF = 0 & (CF = 1 || ZF = 1)
1709 //X86::JNP, X86::JBE
1710 //X86::AND8rr
1711 return true; // FIXME: Emit more efficient code for this branch.
1712 }
1713
Chris Lattner6c07aee2005-01-11 04:06:27 +00001714 Select(Chain);
Chris Lattnercb1aa8d2005-01-17 01:34:14 +00001715 EmitCMP(SetCC->getOperand(0), SetCC->getOperand(1), SetCC->hasOneUse());
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001716 BuildMI(BB, Opc, 1).addMBB(Dest);
1717 if (Opc2)
1718 BuildMI(BB, Opc2, 1).addMBB(Dest);
1719 return false;
1720}
1721
Chris Lattner24aad1b2005-01-10 22:10:13 +00001722/// EmitSelectCC - Emit code into BB that performs a select operation between
1723/// the two registers RTrue and RFalse, generating a result into RDest. Return
1724/// true if the fold cannot be performed.
1725///
1726void ISel::EmitSelectCC(SDOperand Cond, MVT::ValueType SVT,
1727 unsigned RTrue, unsigned RFalse, unsigned RDest) {
1728 enum Condition {
1729 EQ, NE, LT, LE, GT, GE, B, BE, A, AE, P, NP,
1730 NOT_SET
1731 } CondCode = NOT_SET;
1732
1733 static const unsigned CMOVTAB16[] = {
1734 X86::CMOVE16rr, X86::CMOVNE16rr, X86::CMOVL16rr, X86::CMOVLE16rr,
1735 X86::CMOVG16rr, X86::CMOVGE16rr, X86::CMOVB16rr, X86::CMOVBE16rr,
Misha Brukman0e0a7a452005-04-21 23:38:14 +00001736 X86::CMOVA16rr, X86::CMOVAE16rr, X86::CMOVP16rr, X86::CMOVNP16rr,
Chris Lattner24aad1b2005-01-10 22:10:13 +00001737 };
1738 static const unsigned CMOVTAB32[] = {
1739 X86::CMOVE32rr, X86::CMOVNE32rr, X86::CMOVL32rr, X86::CMOVLE32rr,
1740 X86::CMOVG32rr, X86::CMOVGE32rr, X86::CMOVB32rr, X86::CMOVBE32rr,
Misha Brukman0e0a7a452005-04-21 23:38:14 +00001741 X86::CMOVA32rr, X86::CMOVAE32rr, X86::CMOVP32rr, X86::CMOVNP32rr,
Chris Lattner24aad1b2005-01-10 22:10:13 +00001742 };
1743 static const unsigned CMOVTABFP[] = {
1744 X86::FCMOVE , X86::FCMOVNE, /*missing*/0, /*missing*/0,
1745 /*missing*/0, /*missing*/0, X86::FCMOVB , X86::FCMOVBE,
1746 X86::FCMOVA , X86::FCMOVAE, X86::FCMOVP , X86::FCMOVNP
1747 };
Nate Begeman16b04f32005-07-15 00:38:55 +00001748 static const int SSE_CMOVTAB[] = {
Nate Begemanf63be7d2005-07-06 18:59:04 +00001749 0 /* CMPEQSS */, 4 /* CMPNEQSS */, 1 /* CMPLTSS */, 2 /* CMPLESS */,
Nate Begeman16b04f32005-07-15 00:38:55 +00001750 1 /* CMPLTSS */, 2 /* CMPLESS */, /*missing*/0, /*missing*/0,
Nate Begemanf63be7d2005-07-06 18:59:04 +00001751 /*missing*/0, /*missing*/0, /*missing*/0, /*missing*/0
1752 };
Chris Lattner24aad1b2005-01-10 22:10:13 +00001753
1754 if (SetCCSDNode *SetCC = dyn_cast<SetCCSDNode>(Cond)) {
1755 if (MVT::isInteger(SetCC->getOperand(0).getValueType())) {
1756 switch (SetCC->getCondition()) {
1757 default: assert(0 && "Unknown integer comparison!");
1758 case ISD::SETEQ: CondCode = EQ; break;
1759 case ISD::SETGT: CondCode = GT; break;
1760 case ISD::SETGE: CondCode = GE; break;
1761 case ISD::SETLT: CondCode = LT; break;
1762 case ISD::SETLE: CondCode = LE; break;
1763 case ISD::SETNE: CondCode = NE; break;
1764 case ISD::SETULT: CondCode = B; break;
1765 case ISD::SETUGT: CondCode = A; break;
1766 case ISD::SETULE: CondCode = BE; break;
1767 case ISD::SETUGE: CondCode = AE; break;
1768 }
Nate Begemanf63be7d2005-07-06 18:59:04 +00001769 } else if (X86ScalarSSE) {
1770 switch (SetCC->getCondition()) {
1771 default: assert(0 && "Unknown scalar fp comparison!");
1772 case ISD::SETEQ: CondCode = EQ; break;
1773 case ISD::SETNE: CondCode = NE; break;
1774 case ISD::SETULT:
1775 case ISD::SETLT: CondCode = LT; break;
1776 case ISD::SETULE:
1777 case ISD::SETLE: CondCode = LE; break;
1778 case ISD::SETUGT:
1779 case ISD::SETGT: CondCode = GT; break;
1780 case ISD::SETUGE:
1781 case ISD::SETGE: CondCode = GE; break;
1782 }
Chris Lattner24aad1b2005-01-10 22:10:13 +00001783 } else {
1784 // On a floating point condition, the flags are set as follows:
1785 // ZF PF CF op
1786 // 0 | 0 | 0 | X > Y
1787 // 0 | 0 | 1 | X < Y
1788 // 1 | 0 | 0 | X == Y
1789 // 1 | 1 | 1 | unordered
1790 //
1791 switch (SetCC->getCondition()) {
1792 default: assert(0 && "Unknown FP comparison!");
1793 case ISD::SETUEQ:
1794 case ISD::SETEQ: CondCode = EQ; break; // True if ZF = 1
1795 case ISD::SETOGT:
1796 case ISD::SETGT: CondCode = A; break; // True if CF = 0 and ZF = 0
1797 case ISD::SETOGE:
1798 case ISD::SETGE: CondCode = AE; break; // True if CF = 0
1799 case ISD::SETULT:
1800 case ISD::SETLT: CondCode = B; break; // True if CF = 1
1801 case ISD::SETULE:
1802 case ISD::SETLE: CondCode = BE; break; // True if CF = 1 or ZF = 1
1803 case ISD::SETONE:
1804 case ISD::SETNE: CondCode = NE; break; // True if ZF = 0
1805 case ISD::SETUO: CondCode = P; break; // True if PF = 1
1806 case ISD::SETO: CondCode = NP; break; // True if PF = 0
1807 case ISD::SETUGT: // PF = 1 | (ZF = 0 & CF = 0)
1808 case ISD::SETUGE: // PF = 1 | CF = 0
1809 case ISD::SETUNE: // PF = 1 | ZF = 0
1810 case ISD::SETOEQ: // PF = 0 & ZF = 1
1811 case ISD::SETOLT: // PF = 0 & CF = 1
1812 case ISD::SETOLE: // PF = 0 & (CF = 1 || ZF = 1)
1813 // We cannot emit this comparison as a single cmov.
1814 break;
1815 }
1816 }
1817 }
1818
Nate Begemanf63be7d2005-07-06 18:59:04 +00001819 // There's no SSE equivalent of FCMOVE. In some cases we can fake it up, in
1820 // Others we will have to do the PowerPC thing and generate an MBB for the
1821 // true and false values and select between them with a PHI.
Jeff Cohen00b168892005-07-27 06:12:32 +00001822 if (X86ScalarSSE && (SVT == MVT::f32 || SVT == MVT::f64)) {
Nate Begeman16b04f32005-07-15 00:38:55 +00001823 if (0 && CondCode != NOT_SET) {
1824 // FIXME: check for min and max
Nate Begemanf63be7d2005-07-06 18:59:04 +00001825 } else {
Nate Begeman16b04f32005-07-15 00:38:55 +00001826 // FIXME: emit a direct compare and branch rather than setting a cond reg
1827 // and testing it.
Nate Begemanf63be7d2005-07-06 18:59:04 +00001828 unsigned CondReg = SelectExpr(Cond);
1829 BuildMI(BB, X86::TEST8rr, 2).addReg(CondReg).addReg(CondReg);
1830
1831 // Create an iterator with which to insert the MBB for copying the false
1832 // value and the MBB to hold the PHI instruction for this SetCC.
1833 MachineBasicBlock *thisMBB = BB;
1834 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1835 ilist<MachineBasicBlock>::iterator It = BB;
1836 ++It;
1837
1838 // thisMBB:
1839 // ...
1840 // TrueVal = ...
1841 // cmpTY ccX, r1, r2
1842 // bCC sinkMBB
1843 // fallthrough --> copy0MBB
1844 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB);
1845 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
1846 BuildMI(BB, X86::JNE, 1).addMBB(sinkMBB);
1847 MachineFunction *F = BB->getParent();
1848 F->getBasicBlockList().insert(It, copy0MBB);
1849 F->getBasicBlockList().insert(It, sinkMBB);
1850 // Update machine-CFG edges
1851 BB->addSuccessor(copy0MBB);
1852 BB->addSuccessor(sinkMBB);
1853
1854 // copy0MBB:
1855 // %FalseValue = ...
1856 // # fallthrough to sinkMBB
1857 BB = copy0MBB;
1858 // Update machine-CFG edges
1859 BB->addSuccessor(sinkMBB);
1860
1861 // sinkMBB:
1862 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1863 // ...
1864 BB = sinkMBB;
1865 BuildMI(BB, X86::PHI, 4, RDest).addReg(RFalse)
1866 .addMBB(copy0MBB).addReg(RTrue).addMBB(thisMBB);
1867 }
1868 return;
1869 }
1870
Chris Lattner24aad1b2005-01-10 22:10:13 +00001871 unsigned Opc = 0;
1872 if (CondCode != NOT_SET) {
1873 switch (SVT) {
1874 default: assert(0 && "Cannot select this type!");
1875 case MVT::i16: Opc = CMOVTAB16[CondCode]; break;
1876 case MVT::i32: Opc = CMOVTAB32[CondCode]; break;
Chris Lattneref7ba072005-01-11 03:50:45 +00001877 case MVT::f64: Opc = CMOVTABFP[CondCode]; break;
Chris Lattner24aad1b2005-01-10 22:10:13 +00001878 }
1879 }
Jeff Cohen00b168892005-07-27 06:12:32 +00001880
Chris Lattner24aad1b2005-01-10 22:10:13 +00001881 // Finally, if we weren't able to fold this, just emit the condition and test
1882 // it.
1883 if (CondCode == NOT_SET || Opc == 0) {
1884 // Get the condition into the zero flag.
1885 unsigned CondReg = SelectExpr(Cond);
1886 BuildMI(BB, X86::TEST8rr, 2).addReg(CondReg).addReg(CondReg);
1887
1888 switch (SVT) {
1889 default: assert(0 && "Cannot select this type!");
1890 case MVT::i16: Opc = X86::CMOVE16rr; break;
1891 case MVT::i32: Opc = X86::CMOVE32rr; break;
Chris Lattneref7ba072005-01-11 03:50:45 +00001892 case MVT::f64: Opc = X86::FCMOVE; break;
Chris Lattner24aad1b2005-01-10 22:10:13 +00001893 }
1894 } else {
1895 // FIXME: CMP R, 0 -> TEST R, R
Chris Lattnercb1aa8d2005-01-17 01:34:14 +00001896 EmitCMP(Cond.getOperand(0), Cond.getOperand(1), Cond.Val->hasOneUse());
Chris Lattnera3aa2e22005-01-11 03:37:59 +00001897 std::swap(RTrue, RFalse);
Chris Lattner24aad1b2005-01-10 22:10:13 +00001898 }
1899 BuildMI(BB, Opc, 2, RDest).addReg(RTrue).addReg(RFalse);
1900}
1901
Chris Lattnercb1aa8d2005-01-17 01:34:14 +00001902void ISel::EmitCMP(SDOperand LHS, SDOperand RHS, bool HasOneUse) {
Chris Lattner11333092005-01-11 03:11:44 +00001903 unsigned Opc;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001904 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(RHS)) {
1905 Opc = 0;
Chris Lattner4ff348b2005-01-17 06:26:58 +00001906 if (HasOneUse && isFoldableLoad(LHS, RHS)) {
Chris Lattneref6806c2005-01-12 02:02:48 +00001907 switch (RHS.getValueType()) {
1908 default: break;
1909 case MVT::i1:
1910 case MVT::i8: Opc = X86::CMP8mi; break;
1911 case MVT::i16: Opc = X86::CMP16mi; break;
1912 case MVT::i32: Opc = X86::CMP32mi; break;
1913 }
1914 if (Opc) {
1915 X86AddressMode AM;
1916 EmitFoldedLoad(LHS, AM);
1917 addFullAddress(BuildMI(BB, Opc, 5), AM).addImm(CN->getValue());
1918 return;
1919 }
1920 }
1921
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001922 switch (RHS.getValueType()) {
1923 default: break;
1924 case MVT::i1:
1925 case MVT::i8: Opc = X86::CMP8ri; break;
1926 case MVT::i16: Opc = X86::CMP16ri; break;
1927 case MVT::i32: Opc = X86::CMP32ri; break;
1928 }
1929 if (Opc) {
Chris Lattner11333092005-01-11 03:11:44 +00001930 unsigned Tmp1 = SelectExpr(LHS);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001931 BuildMI(BB, Opc, 2).addReg(Tmp1).addImm(CN->getValue());
1932 return;
1933 }
Chris Lattner7f2afac2005-01-14 22:37:41 +00001934 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(RHS)) {
Nate Begemanf63be7d2005-07-06 18:59:04 +00001935 if (!X86ScalarSSE && (CN->isExactlyValue(+0.0) ||
1936 CN->isExactlyValue(-0.0))) {
Chris Lattner7f2afac2005-01-14 22:37:41 +00001937 unsigned Reg = SelectExpr(LHS);
1938 BuildMI(BB, X86::FTST, 1).addReg(Reg);
1939 BuildMI(BB, X86::FNSTSW8r, 0);
1940 BuildMI(BB, X86::SAHF, 1);
Chris Lattner7805fa42005-03-17 16:29:26 +00001941 return;
Chris Lattner7f2afac2005-01-14 22:37:41 +00001942 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001943 }
1944
Chris Lattneref6806c2005-01-12 02:02:48 +00001945 Opc = 0;
Chris Lattner4ff348b2005-01-17 06:26:58 +00001946 if (HasOneUse && isFoldableLoad(LHS, RHS)) {
Chris Lattneref6806c2005-01-12 02:02:48 +00001947 switch (RHS.getValueType()) {
1948 default: break;
1949 case MVT::i1:
1950 case MVT::i8: Opc = X86::CMP8mr; break;
1951 case MVT::i16: Opc = X86::CMP16mr; break;
1952 case MVT::i32: Opc = X86::CMP32mr; break;
1953 }
1954 if (Opc) {
1955 X86AddressMode AM;
Chris Lattner636e79a2005-01-13 05:53:16 +00001956 EmitFoldedLoad(LHS, AM);
1957 unsigned Reg = SelectExpr(RHS);
Chris Lattneref6806c2005-01-12 02:02:48 +00001958 addFullAddress(BuildMI(BB, Opc, 5), AM).addReg(Reg);
1959 return;
1960 }
1961 }
1962
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001963 switch (LHS.getValueType()) {
1964 default: assert(0 && "Cannot compare this value!");
1965 case MVT::i1:
1966 case MVT::i8: Opc = X86::CMP8rr; break;
1967 case MVT::i16: Opc = X86::CMP16rr; break;
1968 case MVT::i32: Opc = X86::CMP32rr; break;
Nate Begemanf63be7d2005-07-06 18:59:04 +00001969 case MVT::f32: Opc = X86::UCOMISSrr; break;
1970 case MVT::f64: Opc = X86ScalarSSE ? X86::UCOMISDrr : X86::FUCOMIr; break;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001971 }
Chris Lattner11333092005-01-11 03:11:44 +00001972 unsigned Tmp1, Tmp2;
1973 if (getRegPressure(LHS) > getRegPressure(RHS)) {
1974 Tmp1 = SelectExpr(LHS);
1975 Tmp2 = SelectExpr(RHS);
1976 } else {
1977 Tmp2 = SelectExpr(RHS);
1978 Tmp1 = SelectExpr(LHS);
1979 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00001980 BuildMI(BB, Opc, 2).addReg(Tmp1).addReg(Tmp2);
1981}
1982
Chris Lattnera5ade062005-01-11 21:19:59 +00001983/// isFoldableLoad - Return true if this is a load instruction that can safely
1984/// be folded into an operation that uses it.
Chris Lattner44129b52005-01-25 20:03:11 +00001985bool ISel::isFoldableLoad(SDOperand Op, SDOperand OtherOp, bool FloatPromoteOk){
1986 if (Op.getOpcode() == ISD::LOAD) {
1987 // FIXME: currently can't fold constant pool indexes.
1988 if (isa<ConstantPoolSDNode>(Op.getOperand(1)))
1989 return false;
1990 } else if (FloatPromoteOk && Op.getOpcode() == ISD::EXTLOAD &&
Chris Lattnerbce81ae2005-07-10 01:56:13 +00001991 cast<VTSDNode>(Op.getOperand(3))->getVT() == MVT::f32) {
Chris Lattner44129b52005-01-25 20:03:11 +00001992 // FIXME: currently can't fold constant pool indexes.
1993 if (isa<ConstantPoolSDNode>(Op.getOperand(1)))
1994 return false;
1995 } else {
Chris Lattnera5ade062005-01-11 21:19:59 +00001996 return false;
Chris Lattner44129b52005-01-25 20:03:11 +00001997 }
Chris Lattnera5ade062005-01-11 21:19:59 +00001998
1999 // If this load has already been emitted, we clearly can't fold it.
Chris Lattner636e79a2005-01-13 05:53:16 +00002000 assert(Op.ResNo == 0 && "Not a use of the value of the load?");
2001 if (ExprMap.count(Op.getValue(1))) return false;
2002 assert(!ExprMap.count(Op.getValue(0)) && "Value in map but not token chain?");
Chris Lattner4a108662005-01-18 03:51:59 +00002003 assert(!ExprMap.count(Op.getValue(1))&&"Token lowered but value not in map?");
Chris Lattnera5ade062005-01-11 21:19:59 +00002004
Chris Lattner4ff348b2005-01-17 06:26:58 +00002005 // If there is not just one use of its value, we cannot fold.
2006 if (!Op.Val->hasNUsesOfValue(1, 0)) return false;
2007
2008 // Finally, we cannot fold the load into the operation if this would induce a
2009 // cycle into the resultant dag. To check for this, see if OtherOp (the other
2010 // operand of the operation we are folding the load into) can possible use the
2011 // chain node defined by the load.
2012 if (OtherOp.Val && !Op.Val->hasNUsesOfValue(0, 1)) { // Has uses of chain?
2013 std::set<SDNode*> Visited;
2014 if (NodeTransitivelyUsesValue(OtherOp, Op.getValue(1), Visited))
2015 return false;
2016 }
2017 return true;
Chris Lattnera5ade062005-01-11 21:19:59 +00002018}
2019
Chris Lattner4ff348b2005-01-17 06:26:58 +00002020
Chris Lattnera5ade062005-01-11 21:19:59 +00002021/// EmitFoldedLoad - Ensure that the arguments of the load are code generated,
2022/// and compute the address being loaded into AM.
2023void ISel::EmitFoldedLoad(SDOperand Op, X86AddressMode &AM) {
2024 SDOperand Chain = Op.getOperand(0);
2025 SDOperand Address = Op.getOperand(1);
Chris Lattner98a8ba02005-01-18 01:06:26 +00002026
Chris Lattnera5ade062005-01-11 21:19:59 +00002027 if (getRegPressure(Chain) > getRegPressure(Address)) {
2028 Select(Chain);
2029 SelectAddress(Address, AM);
2030 } else {
2031 SelectAddress(Address, AM);
2032 Select(Chain);
2033 }
2034
2035 // The chain for this load is now lowered.
Chris Lattner636e79a2005-01-13 05:53:16 +00002036 assert(ExprMap.count(SDOperand(Op.Val, 1)) == 0 &&
2037 "Load emitted more than once?");
Chris Lattner4a108662005-01-18 03:51:59 +00002038 if (!ExprMap.insert(std::make_pair(Op.getValue(1), 1)).second)
Chris Lattner636e79a2005-01-13 05:53:16 +00002039 assert(0 && "Load emitted more than once!");
Chris Lattnera5ade062005-01-11 21:19:59 +00002040}
2041
Chris Lattner30ea1e92005-01-19 07:37:26 +00002042// EmitOrOpOp - Pattern match the expression (Op1|Op2), where we know that op1
2043// and op2 are i8/i16/i32 values with one use each (the or). If we can form a
2044// SHLD or SHRD, emit the instruction (generating the value into DestReg) and
2045// return true.
2046bool ISel::EmitOrOpOp(SDOperand Op1, SDOperand Op2, unsigned DestReg) {
Chris Lattner85716372005-01-19 06:18:43 +00002047 if (Op1.getOpcode() == ISD::SHL && Op2.getOpcode() == ISD::SRL) {
2048 // good!
2049 } else if (Op2.getOpcode() == ISD::SHL && Op1.getOpcode() == ISD::SRL) {
2050 std::swap(Op1, Op2); // Op1 is the SHL now.
2051 } else {
2052 return false; // No match
2053 }
2054
2055 SDOperand ShlVal = Op1.getOperand(0);
2056 SDOperand ShlAmt = Op1.getOperand(1);
2057 SDOperand ShrVal = Op2.getOperand(0);
2058 SDOperand ShrAmt = Op2.getOperand(1);
2059
Chris Lattner30ea1e92005-01-19 07:37:26 +00002060 unsigned RegSize = MVT::getSizeInBits(Op1.getValueType());
2061
Chris Lattner85716372005-01-19 06:18:43 +00002062 // Find out if ShrAmt = 32-ShlAmt or ShlAmt = 32-ShrAmt.
2063 if (ShlAmt.getOpcode() == ISD::SUB && ShlAmt.getOperand(1) == ShrAmt)
2064 if (ConstantSDNode *SubCST = dyn_cast<ConstantSDNode>(ShlAmt.getOperand(0)))
Chris Lattner4053b1e2005-01-19 08:07:05 +00002065 if (SubCST->getValue() == RegSize) {
2066 // (A >> ShrAmt) | (A << (32-ShrAmt)) ==> ROR A, ShrAmt
Chris Lattner85716372005-01-19 06:18:43 +00002067 // (A >> ShrAmt) | (B << (32-ShrAmt)) ==> SHRD A, B, ShrAmt
Chris Lattner4053b1e2005-01-19 08:07:05 +00002068 if (ShrVal == ShlVal) {
2069 unsigned Reg, ShAmt;
2070 if (getRegPressure(ShrVal) > getRegPressure(ShrAmt)) {
2071 Reg = SelectExpr(ShrVal);
2072 ShAmt = SelectExpr(ShrAmt);
2073 } else {
2074 ShAmt = SelectExpr(ShrAmt);
2075 Reg = SelectExpr(ShrVal);
2076 }
2077 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(ShAmt);
2078 unsigned Opc = RegSize == 8 ? X86::ROR8rCL :
2079 (RegSize == 16 ? X86::ROR16rCL : X86::ROR32rCL);
2080 BuildMI(BB, Opc, 1, DestReg).addReg(Reg);
2081 return true;
2082 } else if (RegSize != 8) {
Chris Lattner85716372005-01-19 06:18:43 +00002083 unsigned AReg, BReg;
2084 if (getRegPressure(ShlVal) > getRegPressure(ShrVal)) {
Chris Lattner85716372005-01-19 06:18:43 +00002085 BReg = SelectExpr(ShlVal);
Chris Lattnerc3c021b2005-01-19 17:24:34 +00002086 AReg = SelectExpr(ShrVal);
Chris Lattner85716372005-01-19 06:18:43 +00002087 } else {
Chris Lattner85716372005-01-19 06:18:43 +00002088 AReg = SelectExpr(ShrVal);
Chris Lattnerc3c021b2005-01-19 17:24:34 +00002089 BReg = SelectExpr(ShlVal);
Chris Lattner85716372005-01-19 06:18:43 +00002090 }
Chris Lattner4053b1e2005-01-19 08:07:05 +00002091 unsigned ShAmt = SelectExpr(ShrAmt);
2092 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(ShAmt);
2093 unsigned Opc = RegSize == 16 ? X86::SHRD16rrCL : X86::SHRD32rrCL;
2094 BuildMI(BB, Opc, 2, DestReg).addReg(AReg).addReg(BReg);
Chris Lattner85716372005-01-19 06:18:43 +00002095 return true;
2096 }
2097 }
2098
Chris Lattner4053b1e2005-01-19 08:07:05 +00002099 if (ShrAmt.getOpcode() == ISD::SUB && ShrAmt.getOperand(1) == ShlAmt)
2100 if (ConstantSDNode *SubCST = dyn_cast<ConstantSDNode>(ShrAmt.getOperand(0)))
2101 if (SubCST->getValue() == RegSize) {
2102 // (A << ShlAmt) | (A >> (32-ShlAmt)) ==> ROL A, ShrAmt
2103 // (A << ShlAmt) | (B >> (32-ShlAmt)) ==> SHLD A, B, ShrAmt
2104 if (ShrVal == ShlVal) {
2105 unsigned Reg, ShAmt;
2106 if (getRegPressure(ShrVal) > getRegPressure(ShlAmt)) {
2107 Reg = SelectExpr(ShrVal);
2108 ShAmt = SelectExpr(ShlAmt);
2109 } else {
2110 ShAmt = SelectExpr(ShlAmt);
2111 Reg = SelectExpr(ShrVal);
2112 }
2113 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(ShAmt);
2114 unsigned Opc = RegSize == 8 ? X86::ROL8rCL :
2115 (RegSize == 16 ? X86::ROL16rCL : X86::ROL32rCL);
2116 BuildMI(BB, Opc, 1, DestReg).addReg(Reg);
2117 return true;
2118 } else if (RegSize != 8) {
2119 unsigned AReg, BReg;
2120 if (getRegPressure(ShlVal) > getRegPressure(ShrVal)) {
Chris Lattnerc3c021b2005-01-19 17:24:34 +00002121 AReg = SelectExpr(ShlVal);
2122 BReg = SelectExpr(ShrVal);
Chris Lattner4053b1e2005-01-19 08:07:05 +00002123 } else {
Chris Lattnerc3c021b2005-01-19 17:24:34 +00002124 BReg = SelectExpr(ShrVal);
2125 AReg = SelectExpr(ShlVal);
Chris Lattner4053b1e2005-01-19 08:07:05 +00002126 }
2127 unsigned ShAmt = SelectExpr(ShlAmt);
2128 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(ShAmt);
2129 unsigned Opc = RegSize == 16 ? X86::SHLD16rrCL : X86::SHLD32rrCL;
2130 BuildMI(BB, Opc, 2, DestReg).addReg(AReg).addReg(BReg);
2131 return true;
2132 }
2133 }
Chris Lattner85716372005-01-19 06:18:43 +00002134
Chris Lattner4053b1e2005-01-19 08:07:05 +00002135 if (ConstantSDNode *ShrCst = dyn_cast<ConstantSDNode>(ShrAmt))
2136 if (ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(ShlAmt))
2137 if (ShrCst->getValue() < RegSize && ShlCst->getValue() < RegSize)
2138 if (ShrCst->getValue() == RegSize-ShlCst->getValue()) {
2139 // (A >> 5) | (A << 27) --> ROR A, 5
2140 // (A >> 5) | (B << 27) --> SHRD A, B, 5
2141 if (ShrVal == ShlVal) {
2142 unsigned Reg = SelectExpr(ShrVal);
2143 unsigned Opc = RegSize == 8 ? X86::ROR8ri :
2144 (RegSize == 16 ? X86::ROR16ri : X86::ROR32ri);
2145 BuildMI(BB, Opc, 2, DestReg).addReg(Reg).addImm(ShrCst->getValue());
2146 return true;
2147 } else if (RegSize != 8) {
2148 unsigned AReg, BReg;
2149 if (getRegPressure(ShlVal) > getRegPressure(ShrVal)) {
Chris Lattner4053b1e2005-01-19 08:07:05 +00002150 BReg = SelectExpr(ShlVal);
Chris Lattnerc3c021b2005-01-19 17:24:34 +00002151 AReg = SelectExpr(ShrVal);
Chris Lattner4053b1e2005-01-19 08:07:05 +00002152 } else {
Chris Lattner4053b1e2005-01-19 08:07:05 +00002153 AReg = SelectExpr(ShrVal);
Chris Lattnerc3c021b2005-01-19 17:24:34 +00002154 BReg = SelectExpr(ShlVal);
Chris Lattner4053b1e2005-01-19 08:07:05 +00002155 }
2156 unsigned Opc = RegSize == 16 ? X86::SHRD16rri8 : X86::SHRD32rri8;
2157 BuildMI(BB, Opc, 3, DestReg).addReg(AReg).addReg(BReg)
2158 .addImm(ShrCst->getValue());
2159 return true;
2160 }
2161 }
Misha Brukman0e0a7a452005-04-21 23:38:14 +00002162
Chris Lattner85716372005-01-19 06:18:43 +00002163 return false;
2164}
2165
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002166unsigned ISel::SelectExpr(SDOperand N) {
2167 unsigned Result;
2168 unsigned Tmp1, Tmp2, Tmp3;
2169 unsigned Opc = 0;
Chris Lattner5188ad72005-01-08 19:28:19 +00002170 SDNode *Node = N.Val;
Chris Lattnera5ade062005-01-11 21:19:59 +00002171 SDOperand Op0, Op1;
Chris Lattner5188ad72005-01-08 19:28:19 +00002172
Chris Lattner7f2afac2005-01-14 22:37:41 +00002173 if (Node->getOpcode() == ISD::CopyFromReg) {
Chris Lattnerc6f41812005-05-12 23:06:28 +00002174 if (MRegisterInfo::isVirtualRegister(cast<RegSDNode>(Node)->getReg()) ||
2175 cast<RegSDNode>(Node)->getReg() == X86::ESP) {
2176 // Just use the specified register as our input.
2177 return cast<RegSDNode>(Node)->getReg();
2178 }
Chris Lattner7f2afac2005-01-14 22:37:41 +00002179 }
Misha Brukman0e0a7a452005-04-21 23:38:14 +00002180
Chris Lattnera5ade062005-01-11 21:19:59 +00002181 unsigned &Reg = ExprMap[N];
2182 if (Reg) return Reg;
Misha Brukman0e0a7a452005-04-21 23:38:14 +00002183
Chris Lattnerb38a7492005-04-02 04:01:14 +00002184 switch (N.getOpcode()) {
2185 default:
Chris Lattnera5ade062005-01-11 21:19:59 +00002186 Reg = Result = (N.getValueType() != MVT::Other) ?
Chris Lattnerb38a7492005-04-02 04:01:14 +00002187 MakeReg(N.getValueType()) : 1;
2188 break;
Chris Lattner239738a2005-05-14 08:48:15 +00002189 case X86ISD::TAILCALL:
2190 case X86ISD::CALL:
Chris Lattnera5ade062005-01-11 21:19:59 +00002191 // If this is a call instruction, make sure to prepare ALL of the result
2192 // values as well as the chain.
Chris Lattner239738a2005-05-14 08:48:15 +00002193 ExprMap[N.getValue(0)] = 1;
2194 if (Node->getNumValues() > 1) {
2195 Result = MakeReg(Node->getValueType(1));
2196 ExprMap[N.getValue(1)] = Result;
2197 for (unsigned i = 2, e = Node->getNumValues(); i != e; ++i)
Chris Lattnera5ade062005-01-11 21:19:59 +00002198 ExprMap[N.getValue(i)] = MakeReg(Node->getValueType(i));
Chris Lattner239738a2005-05-14 08:48:15 +00002199 } else {
2200 Result = 1;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002201 }
Chris Lattnerb38a7492005-04-02 04:01:14 +00002202 break;
2203 case ISD::ADD_PARTS:
2204 case ISD::SUB_PARTS:
2205 case ISD::SHL_PARTS:
2206 case ISD::SRL_PARTS:
2207 case ISD::SRA_PARTS:
2208 Result = MakeReg(Node->getValueType(0));
2209 ExprMap[N.getValue(0)] = Result;
2210 for (unsigned i = 1, e = N.Val->getNumValues(); i != e; ++i)
2211 ExprMap[N.getValue(i)] = MakeReg(Node->getValueType(i));
2212 break;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002213 }
Misha Brukman0e0a7a452005-04-21 23:38:14 +00002214
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002215 switch (N.getOpcode()) {
2216 default:
Chris Lattner5188ad72005-01-08 19:28:19 +00002217 Node->dump();
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002218 assert(0 && "Node not handled!\n");
Nate Begemanf63be7d2005-07-06 18:59:04 +00002219 case ISD::FP_EXTEND:
Jeff Cohen00b168892005-07-27 06:12:32 +00002220 assert(X86ScalarSSE && "Scalar SSE FP must be enabled to use f32");
Nate Begemanf63be7d2005-07-06 18:59:04 +00002221 Tmp1 = SelectExpr(N.getOperand(0));
2222 BuildMI(BB, X86::CVTSS2SDrr, 1, Result).addReg(Tmp1);
2223 return Result;
Nate Begeman16b04f32005-07-15 00:38:55 +00002224 case ISD::FP_ROUND:
Jeff Cohen00b168892005-07-27 06:12:32 +00002225 assert(X86ScalarSSE && "Scalar SSE FP must be enabled to use f32");
Nate Begeman16b04f32005-07-15 00:38:55 +00002226 Tmp1 = SelectExpr(N.getOperand(0));
2227 BuildMI(BB, X86::CVTSD2SSrr, 1, Result).addReg(Tmp1);
2228 return Result;
Chris Lattnerc6f41812005-05-12 23:06:28 +00002229 case ISD::CopyFromReg:
2230 Select(N.getOperand(0));
2231 if (Result == 1) {
2232 Reg = Result = ExprMap[N.getValue(0)] =
2233 MakeReg(N.getValue(0).getValueType());
2234 }
2235 switch (Node->getValueType(0)) {
2236 default: assert(0 && "Cannot CopyFromReg this!");
2237 case MVT::i1:
2238 case MVT::i8:
2239 BuildMI(BB, X86::MOV8rr, 1,
2240 Result).addReg(cast<RegSDNode>(Node)->getReg());
2241 return Result;
2242 case MVT::i16:
2243 BuildMI(BB, X86::MOV16rr, 1,
2244 Result).addReg(cast<RegSDNode>(Node)->getReg());
2245 return Result;
2246 case MVT::i32:
2247 BuildMI(BB, X86::MOV32rr, 1,
2248 Result).addReg(cast<RegSDNode>(Node)->getReg());
2249 return Result;
Jeff Cohen00b168892005-07-27 06:12:32 +00002250 }
Chris Lattnerc6f41812005-05-12 23:06:28 +00002251
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002252 case ISD::FrameIndex:
2253 Tmp1 = cast<FrameIndexSDNode>(N)->getIndex();
2254 addFrameReference(BuildMI(BB, X86::LEA32r, 4, Result), (int)Tmp1);
2255 return Result;
2256 case ISD::ConstantPool:
2257 Tmp1 = cast<ConstantPoolSDNode>(N)->getIndex();
2258 addConstantPoolReference(BuildMI(BB, X86::LEA32r, 4, Result), Tmp1);
2259 return Result;
2260 case ISD::ConstantFP:
2261 ContainsFPCode = true;
2262 Tmp1 = Result; // Intermediate Register
2263 if (cast<ConstantFPSDNode>(N)->getValue() < 0.0 ||
2264 cast<ConstantFPSDNode>(N)->isExactlyValue(-0.0))
2265 Tmp1 = MakeReg(MVT::f64);
2266
2267 if (cast<ConstantFPSDNode>(N)->isExactlyValue(+0.0) ||
2268 cast<ConstantFPSDNode>(N)->isExactlyValue(-0.0))
2269 BuildMI(BB, X86::FLD0, 0, Tmp1);
2270 else if (cast<ConstantFPSDNode>(N)->isExactlyValue(+1.0) ||
2271 cast<ConstantFPSDNode>(N)->isExactlyValue(-1.0))
2272 BuildMI(BB, X86::FLD1, 0, Tmp1);
2273 else
2274 assert(0 && "Unexpected constant!");
2275 if (Tmp1 != Result)
2276 BuildMI(BB, X86::FCHS, 1, Result).addReg(Tmp1);
2277 return Result;
2278 case ISD::Constant:
2279 switch (N.getValueType()) {
2280 default: assert(0 && "Cannot use constants of this type!");
2281 case MVT::i1:
2282 case MVT::i8: Opc = X86::MOV8ri; break;
2283 case MVT::i16: Opc = X86::MOV16ri; break;
2284 case MVT::i32: Opc = X86::MOV32ri; break;
2285 }
2286 BuildMI(BB, Opc, 1,Result).addImm(cast<ConstantSDNode>(N)->getValue());
2287 return Result;
Chris Lattner7ce7eff2005-04-01 22:46:45 +00002288 case ISD::UNDEF:
2289 if (Node->getValueType(0) == MVT::f64) {
2290 // FIXME: SHOULD TEACH STACKIFIER ABOUT UNDEF VALUES!
2291 BuildMI(BB, X86::FLD0, 0, Result);
2292 } else {
2293 BuildMI(BB, X86::IMPLICIT_DEF, 0, Result);
2294 }
2295 return Result;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002296 case ISD::GlobalAddress: {
2297 GlobalValue *GV = cast<GlobalAddressSDNode>(N)->getGlobal();
Nate Begemanfb5792f2005-07-12 01:41:54 +00002298 // For Darwin, external and weak symbols are indirect, so we want to load
2299 // the value at address GV, not the value of GV itself.
Jeff Cohen00b168892005-07-27 06:12:32 +00002300 if (Subtarget->getIndirectExternAndWeakGlobals() &&
Nate Begemanfb5792f2005-07-12 01:41:54 +00002301 (GV->hasWeakLinkage() || GV->isExternal())) {
2302 BuildMI(BB, X86::MOV32rm, 4, Result).addReg(0).addZImm(1).addReg(0)
2303 .addGlobalAddress(GV, false, 0);
2304 } else {
2305 BuildMI(BB, X86::MOV32ri, 1, Result).addGlobalAddress(GV);
2306 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002307 return Result;
2308 }
2309 case ISD::ExternalSymbol: {
2310 const char *Sym = cast<ExternalSymbolSDNode>(N)->getSymbol();
2311 BuildMI(BB, X86::MOV32ri, 1, Result).addExternalSymbol(Sym);
2312 return Result;
2313 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002314 case ISD::ZERO_EXTEND: {
2315 int DestIs16 = N.getValueType() == MVT::i16;
2316 int SrcIs16 = N.getOperand(0).getValueType() == MVT::i16;
Chris Lattner590d8002005-01-09 18:52:44 +00002317
2318 // FIXME: This hack is here for zero extension casts from bool to i8. This
2319 // would not be needed if bools were promoted by Legalize.
2320 if (N.getValueType() == MVT::i8) {
Chris Lattnerdbba22f2005-01-11 23:33:00 +00002321 Tmp1 = SelectExpr(N.getOperand(0));
Chris Lattner590d8002005-01-09 18:52:44 +00002322 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(Tmp1);
2323 return Result;
2324 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002325
Chris Lattner4ff348b2005-01-17 06:26:58 +00002326 if (isFoldableLoad(N.getOperand(0), SDOperand())) {
Chris Lattnerdbba22f2005-01-11 23:33:00 +00002327 static const unsigned Opc[3] = {
2328 X86::MOVZX32rm8, X86::MOVZX32rm16, X86::MOVZX16rm8
2329 };
2330
2331 X86AddressMode AM;
2332 EmitFoldedLoad(N.getOperand(0), AM);
2333 addFullAddress(BuildMI(BB, Opc[SrcIs16+DestIs16*2], 4, Result), AM);
Misha Brukman0e0a7a452005-04-21 23:38:14 +00002334
Chris Lattnerdbba22f2005-01-11 23:33:00 +00002335 return Result;
2336 }
2337
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002338 static const unsigned Opc[3] = {
2339 X86::MOVZX32rr8, X86::MOVZX32rr16, X86::MOVZX16rr8
2340 };
Chris Lattnerdbba22f2005-01-11 23:33:00 +00002341 Tmp1 = SelectExpr(N.getOperand(0));
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002342 BuildMI(BB, Opc[SrcIs16+DestIs16*2], 1, Result).addReg(Tmp1);
2343 return Result;
Misha Brukman0e0a7a452005-04-21 23:38:14 +00002344 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002345 case ISD::SIGN_EXTEND: {
2346 int DestIs16 = N.getValueType() == MVT::i16;
2347 int SrcIs16 = N.getOperand(0).getValueType() == MVT::i16;
2348
Chris Lattner590d8002005-01-09 18:52:44 +00002349 // FIXME: Legalize should promote bools to i8!
2350 assert(N.getOperand(0).getValueType() != MVT::i1 &&
2351 "Sign extend from bool not implemented!");
2352
Chris Lattner4ff348b2005-01-17 06:26:58 +00002353 if (isFoldableLoad(N.getOperand(0), SDOperand())) {
Chris Lattnerdbba22f2005-01-11 23:33:00 +00002354 static const unsigned Opc[3] = {
2355 X86::MOVSX32rm8, X86::MOVSX32rm16, X86::MOVSX16rm8
2356 };
2357
2358 X86AddressMode AM;
2359 EmitFoldedLoad(N.getOperand(0), AM);
2360 addFullAddress(BuildMI(BB, Opc[SrcIs16+DestIs16*2], 4, Result), AM);
2361 return Result;
2362 }
2363
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002364 static const unsigned Opc[3] = {
2365 X86::MOVSX32rr8, X86::MOVSX32rr16, X86::MOVSX16rr8
2366 };
2367 Tmp1 = SelectExpr(N.getOperand(0));
2368 BuildMI(BB, Opc[SrcIs16+DestIs16*2], 1, Result).addReg(Tmp1);
2369 return Result;
2370 }
2371 case ISD::TRUNCATE:
Chris Lattnerafce4302005-01-12 02:19:06 +00002372 // Fold TRUNCATE (LOAD P) into a smaller load from P.
Chris Lattner477c9312005-01-18 20:05:56 +00002373 // FIXME: This should be performed by the DAGCombiner.
Chris Lattner4ff348b2005-01-17 06:26:58 +00002374 if (isFoldableLoad(N.getOperand(0), SDOperand())) {
Chris Lattnerafce4302005-01-12 02:19:06 +00002375 switch (N.getValueType()) {
2376 default: assert(0 && "Unknown truncate!");
2377 case MVT::i1:
2378 case MVT::i8: Opc = X86::MOV8rm; break;
2379 case MVT::i16: Opc = X86::MOV16rm; break;
2380 }
2381 X86AddressMode AM;
2382 EmitFoldedLoad(N.getOperand(0), AM);
2383 addFullAddress(BuildMI(BB, Opc, 4, Result), AM);
2384 return Result;
2385 }
2386
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002387 // Handle cast of LARGER int to SMALLER int using a move to EAX followed by
2388 // a move out of AX or AL.
2389 switch (N.getOperand(0).getValueType()) {
2390 default: assert(0 && "Unknown truncate!");
2391 case MVT::i8: Tmp2 = X86::AL; Opc = X86::MOV8rr; break;
2392 case MVT::i16: Tmp2 = X86::AX; Opc = X86::MOV16rr; break;
2393 case MVT::i32: Tmp2 = X86::EAX; Opc = X86::MOV32rr; break;
2394 }
2395 Tmp1 = SelectExpr(N.getOperand(0));
2396 BuildMI(BB, Opc, 1, Tmp2).addReg(Tmp1);
2397
2398 switch (N.getValueType()) {
2399 default: assert(0 && "Unknown truncate!");
2400 case MVT::i1:
2401 case MVT::i8: Tmp2 = X86::AL; Opc = X86::MOV8rr; break;
2402 case MVT::i16: Tmp2 = X86::AX; Opc = X86::MOV16rr; break;
2403 }
2404 BuildMI(BB, Opc, 1, Result).addReg(Tmp2);
2405 return Result;
2406
Chris Lattnera28381c2005-07-16 00:28:20 +00002407 case ISD::SINT_TO_FP: {
Nate Begemanf63be7d2005-07-06 18:59:04 +00002408 Tmp1 = SelectExpr(N.getOperand(0)); // Get the operand register
2409 unsigned PromoteOpcode = 0;
2410
Nate Begeman5a8441e2005-07-16 02:02:34 +00002411 // We can handle any sint to fp with the direct sse conversion instructions.
Nate Begemanf63be7d2005-07-06 18:59:04 +00002412 if (X86ScalarSSE) {
Nate Begeman5a8441e2005-07-16 02:02:34 +00002413 Opc = (N.getValueType() == MVT::f64) ? X86::CVTSI2SDrr : X86::CVTSI2SSrr;
Nate Begemanf63be7d2005-07-06 18:59:04 +00002414 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
2415 return Result;
2416 }
Jeff Cohen00b168892005-07-27 06:12:32 +00002417
Chris Lattneref7ba072005-01-11 03:50:45 +00002418 ContainsFPCode = true;
Chris Lattner590d8002005-01-09 18:52:44 +00002419
Chris Lattner590d8002005-01-09 18:52:44 +00002420 // Spill the integer to memory and reload it from there.
Nate Begeman5a8441e2005-07-16 02:02:34 +00002421 MVT::ValueType SrcTy = N.getOperand(0).getValueType();
Chris Lattner590d8002005-01-09 18:52:44 +00002422 unsigned Size = MVT::getSizeInBits(SrcTy)/8;
2423 MachineFunction *F = BB->getParent();
2424 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
2425
2426 switch (SrcTy) {
Chris Lattner590d8002005-01-09 18:52:44 +00002427 case MVT::i32:
Chris Lattnera28381c2005-07-16 00:28:20 +00002428 addFrameReference(BuildMI(BB, X86::MOV32mr, 5), FrameIdx).addReg(Tmp1);
Chris Lattner590d8002005-01-09 18:52:44 +00002429 addFrameReference(BuildMI(BB, X86::FILD32m, 5, Result), FrameIdx);
2430 break;
2431 case MVT::i16:
Chris Lattnera28381c2005-07-16 00:28:20 +00002432 addFrameReference(BuildMI(BB, X86::MOV16mr, 5), FrameIdx).addReg(Tmp1);
Chris Lattner590d8002005-01-09 18:52:44 +00002433 addFrameReference(BuildMI(BB, X86::FILD16m, 5, Result), FrameIdx);
2434 break;
2435 default: break; // No promotion required.
2436 }
Chris Lattnera28381c2005-07-16 00:28:20 +00002437 return Result;
Chris Lattner590d8002005-01-09 18:52:44 +00002438 }
2439 case ISD::FP_TO_SINT:
2440 case ISD::FP_TO_UINT: {
2441 // FIXME: Most of this grunt work should be done by legalize!
2442 Tmp1 = SelectExpr(N.getOperand(0)); // Get the operand register
2443
Nate Begemanf63be7d2005-07-06 18:59:04 +00002444 // If the target supports SSE2 and is performing FP operations in SSE regs
2445 // instead of the FP stack, then we can use the efficient CVTSS2SI and
2446 // CVTSD2SI instructions.
2447 if (ISD::FP_TO_SINT == N.getOpcode() && X86ScalarSSE) {
2448 if (MVT::f32 == N.getOperand(0).getValueType()) {
Nate Begeman16b04f32005-07-15 00:38:55 +00002449 BuildMI(BB, X86::CVTTSS2SIrr, 1, Result).addReg(Tmp1);
Nate Begemanf63be7d2005-07-06 18:59:04 +00002450 } else if (MVT::f64 == N.getOperand(0).getValueType()) {
Nate Begeman16b04f32005-07-15 00:38:55 +00002451 BuildMI(BB, X86::CVTTSD2SIrr, 1, Result).addReg(Tmp1);
Nate Begemanf63be7d2005-07-06 18:59:04 +00002452 } else {
2453 assert(0 && "Not an f32 or f64?");
2454 abort();
2455 }
2456 return Result;
Jeff Cohen00b168892005-07-27 06:12:32 +00002457 }
Nate Begemanf63be7d2005-07-06 18:59:04 +00002458
Chris Lattner590d8002005-01-09 18:52:44 +00002459 // Change the floating point control register to use "round towards zero"
2460 // mode when truncating to an integer value.
2461 //
2462 MachineFunction *F = BB->getParent();
2463 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
2464 addFrameReference(BuildMI(BB, X86::FNSTCW16m, 4), CWFrameIdx);
2465
2466 // Load the old value of the high byte of the control word...
2467 unsigned HighPartOfCW = MakeReg(MVT::i8);
2468 addFrameReference(BuildMI(BB, X86::MOV8rm, 4, HighPartOfCW),
2469 CWFrameIdx, 1);
2470
2471 // Set the high part to be round to zero...
2472 addFrameReference(BuildMI(BB, X86::MOV8mi, 5),
2473 CWFrameIdx, 1).addImm(12);
2474
2475 // Reload the modified control word now...
2476 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
Misha Brukman0e0a7a452005-04-21 23:38:14 +00002477
Chris Lattner590d8002005-01-09 18:52:44 +00002478 // Restore the memory image of control word to original value
2479 addFrameReference(BuildMI(BB, X86::MOV8mr, 5),
2480 CWFrameIdx, 1).addReg(HighPartOfCW);
2481
2482 // We don't have the facilities for directly storing byte sized data to
2483 // memory. Promote it to 16 bits. We also must promote unsigned values to
2484 // larger classes because we only have signed FP stores.
2485 MVT::ValueType StoreClass = Node->getValueType(0);
2486 if (StoreClass == MVT::i8 || Node->getOpcode() == ISD::FP_TO_UINT)
2487 switch (StoreClass) {
Chris Lattner2afa1912005-05-09 05:33:18 +00002488 case MVT::i1:
Chris Lattner590d8002005-01-09 18:52:44 +00002489 case MVT::i8: StoreClass = MVT::i16; break;
2490 case MVT::i16: StoreClass = MVT::i32; break;
2491 case MVT::i32: StoreClass = MVT::i64; break;
Chris Lattner590d8002005-01-09 18:52:44 +00002492 default: assert(0 && "Unknown store class!");
2493 }
2494
2495 // Spill the integer to memory and reload it from there.
2496 unsigned Size = MVT::getSizeInBits(StoreClass)/8;
2497 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
2498
2499 switch (StoreClass) {
2500 default: assert(0 && "Unknown store class!");
2501 case MVT::i16:
2502 addFrameReference(BuildMI(BB, X86::FIST16m, 5), FrameIdx).addReg(Tmp1);
2503 break;
2504 case MVT::i32:
Chris Lattner25020852005-01-09 19:49:59 +00002505 addFrameReference(BuildMI(BB, X86::FIST32m, 5), FrameIdx).addReg(Tmp1);
Chris Lattner590d8002005-01-09 18:52:44 +00002506 break;
Chris Lattnera0dbf182005-05-09 18:37:02 +00002507 case MVT::i64:
2508 addFrameReference(BuildMI(BB, X86::FISTP64m, 5), FrameIdx).addReg(Tmp1);
Chris Lattner745d5382005-07-29 00:40:01 +00002509 break;
2510 }
Chris Lattner590d8002005-01-09 18:52:44 +00002511
2512 switch (Node->getValueType(0)) {
2513 default:
2514 assert(0 && "Unknown integer type!");
Chris Lattner590d8002005-01-09 18:52:44 +00002515 case MVT::i32:
2516 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Result), FrameIdx);
2517 break;
2518 case MVT::i16:
2519 addFrameReference(BuildMI(BB, X86::MOV16rm, 4, Result), FrameIdx);
2520 break;
2521 case MVT::i8:
Chris Lattner2afa1912005-05-09 05:33:18 +00002522 case MVT::i1:
Chris Lattner590d8002005-01-09 18:52:44 +00002523 addFrameReference(BuildMI(BB, X86::MOV8rm, 4, Result), FrameIdx);
2524 break;
2525 }
2526
2527 // Reload the original control word now.
2528 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
2529 return Result;
2530 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002531 case ISD::ADD:
Chris Lattnera5ade062005-01-11 21:19:59 +00002532 Op0 = N.getOperand(0);
2533 Op1 = N.getOperand(1);
2534
Chris Lattner44129b52005-01-25 20:03:11 +00002535 if (isFoldableLoad(Op0, Op1, true)) {
Chris Lattnera5ade062005-01-11 21:19:59 +00002536 std::swap(Op0, Op1);
Chris Lattner4ff348b2005-01-17 06:26:58 +00002537 goto FoldAdd;
2538 }
Chris Lattnera5ade062005-01-11 21:19:59 +00002539
Chris Lattner44129b52005-01-25 20:03:11 +00002540 if (isFoldableLoad(Op1, Op0, true)) {
Chris Lattner4ff348b2005-01-17 06:26:58 +00002541 FoldAdd:
Chris Lattnera5ade062005-01-11 21:19:59 +00002542 switch (N.getValueType()) {
2543 default: assert(0 && "Cannot add this type!");
2544 case MVT::i1:
2545 case MVT::i8: Opc = X86::ADD8rm; break;
2546 case MVT::i16: Opc = X86::ADD16rm; break;
2547 case MVT::i32: Opc = X86::ADD32rm; break;
Nate Begemanf63be7d2005-07-06 18:59:04 +00002548 case MVT::f32: Opc = X86::ADDSSrm; break;
Chris Lattner44129b52005-01-25 20:03:11 +00002549 case MVT::f64:
2550 // For F64, handle promoted load operations (from F32) as well!
Nate Begemanf63be7d2005-07-06 18:59:04 +00002551 if (X86ScalarSSE) {
2552 assert(Op1.getOpcode() == ISD::LOAD && "SSE load not promoted");
2553 Opc = X86::ADDSDrm;
2554 } else {
2555 Opc = Op1.getOpcode() == ISD::LOAD ? X86::FADD64m : X86::FADD32m;
2556 }
Chris Lattner44129b52005-01-25 20:03:11 +00002557 break;
Chris Lattnera5ade062005-01-11 21:19:59 +00002558 }
2559 X86AddressMode AM;
Chris Lattner636e79a2005-01-13 05:53:16 +00002560 EmitFoldedLoad(Op1, AM);
2561 Tmp1 = SelectExpr(Op0);
Chris Lattnera5ade062005-01-11 21:19:59 +00002562 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
2563 return Result;
2564 }
2565
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002566 // See if we can codegen this as an LEA to fold operations together.
2567 if (N.getValueType() == MVT::i32) {
Chris Lattner883c86f2005-01-18 02:25:52 +00002568 ExprMap.erase(N);
Chris Lattner98a8ba02005-01-18 01:06:26 +00002569 X86ISelAddressMode AM;
Chris Lattner883c86f2005-01-18 02:25:52 +00002570 MatchAddress(N, AM);
2571 ExprMap[N] = Result;
2572
2573 // If this is not just an add, emit the LEA. For a simple add (like
2574 // reg+reg or reg+imm), we just emit an add. It might be a good idea to
2575 // leave this as LEA, then peephole it to 'ADD' after two address elim
2576 // happens.
2577 if (AM.Scale != 1 || AM.BaseType == X86ISelAddressMode::FrameIndexBase||
2578 AM.GV || (AM.Base.Reg.Val && AM.IndexReg.Val && AM.Disp)) {
2579 X86AddressMode XAM = SelectAddrExprs(AM);
2580 addFullAddress(BuildMI(BB, X86::LEA32r, 4, Result), XAM);
2581 return Result;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002582 }
2583 }
Chris Lattner11333092005-01-11 03:11:44 +00002584
Chris Lattnera5ade062005-01-11 21:19:59 +00002585 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002586 Opc = 0;
2587 if (CN->getValue() == 1) { // add X, 1 -> inc X
2588 switch (N.getValueType()) {
2589 default: assert(0 && "Cannot integer add this type!");
2590 case MVT::i8: Opc = X86::INC8r; break;
2591 case MVT::i16: Opc = X86::INC16r; break;
2592 case MVT::i32: Opc = X86::INC32r; break;
2593 }
2594 } else if (CN->isAllOnesValue()) { // add X, -1 -> dec X
2595 switch (N.getValueType()) {
2596 default: assert(0 && "Cannot integer add this type!");
2597 case MVT::i8: Opc = X86::DEC8r; break;
2598 case MVT::i16: Opc = X86::DEC16r; break;
2599 case MVT::i32: Opc = X86::DEC32r; break;
2600 }
2601 }
2602
2603 if (Opc) {
Chris Lattnera5ade062005-01-11 21:19:59 +00002604 Tmp1 = SelectExpr(Op0);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002605 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
2606 return Result;
2607 }
2608
2609 switch (N.getValueType()) {
2610 default: assert(0 && "Cannot add this type!");
2611 case MVT::i8: Opc = X86::ADD8ri; break;
2612 case MVT::i16: Opc = X86::ADD16ri; break;
2613 case MVT::i32: Opc = X86::ADD32ri; break;
2614 }
2615 if (Opc) {
Chris Lattnera5ade062005-01-11 21:19:59 +00002616 Tmp1 = SelectExpr(Op0);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002617 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
2618 return Result;
2619 }
2620 }
2621
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002622 switch (N.getValueType()) {
2623 default: assert(0 && "Cannot add this type!");
2624 case MVT::i8: Opc = X86::ADD8rr; break;
2625 case MVT::i16: Opc = X86::ADD16rr; break;
2626 case MVT::i32: Opc = X86::ADD32rr; break;
Nate Begemanf63be7d2005-07-06 18:59:04 +00002627 case MVT::f32: Opc = X86::ADDSSrr; break;
2628 case MVT::f64: Opc = X86ScalarSSE ? X86::ADDSDrr : X86::FpADD; break;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002629 }
Chris Lattner11333092005-01-11 03:11:44 +00002630
Chris Lattnera5ade062005-01-11 21:19:59 +00002631 if (getRegPressure(Op0) > getRegPressure(Op1)) {
2632 Tmp1 = SelectExpr(Op0);
2633 Tmp2 = SelectExpr(Op1);
Chris Lattner11333092005-01-11 03:11:44 +00002634 } else {
Chris Lattnera5ade062005-01-11 21:19:59 +00002635 Tmp2 = SelectExpr(Op1);
2636 Tmp1 = SelectExpr(Op0);
Chris Lattner11333092005-01-11 03:11:44 +00002637 }
2638
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002639 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
2640 return Result;
Chris Lattnerb7edaa12005-04-02 05:30:17 +00002641
Nate Begemanf63be7d2005-07-06 18:59:04 +00002642 case ISD::FSQRT:
2643 Tmp1 = SelectExpr(Node->getOperand(0));
2644 if (X86ScalarSSE) {
2645 Opc = (N.getValueType() == MVT::f32) ? X86::SQRTSSrr : X86::SQRTSDrr;
2646 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
2647 } else {
2648 BuildMI(BB, X86::FSQRT, 1, Result).addReg(Tmp1);
2649 }
2650 return Result;
2651
2652 // FIXME:
2653 // Once we can spill 16 byte constants into the constant pool, we can
2654 // implement SSE equivalents of FABS and FCHS.
Chris Lattnerb7edaa12005-04-02 05:30:17 +00002655 case ISD::FABS:
Chris Lattnerb7edaa12005-04-02 05:30:17 +00002656 case ISD::FNEG:
Chris Lattnerc5dcb532005-04-30 04:25:35 +00002657 case ISD::FSIN:
2658 case ISD::FCOS:
Chris Lattner2c56e8a2005-04-28 22:07:18 +00002659 assert(N.getValueType()==MVT::f64 && "Illegal type for this operation");
Chris Lattnerb7edaa12005-04-02 05:30:17 +00002660 Tmp1 = SelectExpr(Node->getOperand(0));
Chris Lattner2c56e8a2005-04-28 22:07:18 +00002661 switch (N.getOpcode()) {
2662 default: assert(0 && "Unreachable!");
2663 case ISD::FABS: BuildMI(BB, X86::FABS, 1, Result).addReg(Tmp1); break;
2664 case ISD::FNEG: BuildMI(BB, X86::FCHS, 1, Result).addReg(Tmp1); break;
Chris Lattnerc5dcb532005-04-30 04:25:35 +00002665 case ISD::FSIN: BuildMI(BB, X86::FSIN, 1, Result).addReg(Tmp1); break;
2666 case ISD::FCOS: BuildMI(BB, X86::FCOS, 1, Result).addReg(Tmp1); break;
Chris Lattner2c56e8a2005-04-28 22:07:18 +00002667 }
Chris Lattnerb7edaa12005-04-02 05:30:17 +00002668 return Result;
2669
Chris Lattner8db0af12005-04-06 04:21:07 +00002670 case ISD::MULHU:
2671 switch (N.getValueType()) {
2672 default: assert(0 && "Unsupported VT!");
2673 case MVT::i8: Tmp2 = X86::MUL8r; break;
2674 case MVT::i16: Tmp2 = X86::MUL16r; break;
2675 case MVT::i32: Tmp2 = X86::MUL32r; break;
2676 }
2677 // FALL THROUGH
2678 case ISD::MULHS: {
2679 unsigned MovOpc, LowReg, HiReg;
2680 switch (N.getValueType()) {
2681 default: assert(0 && "Unsupported VT!");
Misha Brukman0e0a7a452005-04-21 23:38:14 +00002682 case MVT::i8:
Chris Lattner8db0af12005-04-06 04:21:07 +00002683 MovOpc = X86::MOV8rr;
2684 LowReg = X86::AL;
2685 HiReg = X86::AH;
2686 Opc = X86::IMUL8r;
2687 break;
2688 case MVT::i16:
2689 MovOpc = X86::MOV16rr;
2690 LowReg = X86::AX;
2691 HiReg = X86::DX;
2692 Opc = X86::IMUL16r;
2693 break;
2694 case MVT::i32:
2695 MovOpc = X86::MOV32rr;
2696 LowReg = X86::EAX;
2697 HiReg = X86::EDX;
2698 Opc = X86::IMUL32r;
2699 break;
2700 }
2701 if (Node->getOpcode() != ISD::MULHS)
2702 Opc = Tmp2; // Get the MULHU opcode.
2703
2704 Op0 = Node->getOperand(0);
2705 Op1 = Node->getOperand(1);
2706 if (getRegPressure(Op0) > getRegPressure(Op1)) {
2707 Tmp1 = SelectExpr(Op0);
2708 Tmp2 = SelectExpr(Op1);
2709 } else {
2710 Tmp2 = SelectExpr(Op1);
2711 Tmp1 = SelectExpr(Op0);
2712 }
2713
2714 // FIXME: Implement folding of loads into the memory operands here!
2715 BuildMI(BB, MovOpc, 1, LowReg).addReg(Tmp1);
2716 BuildMI(BB, Opc, 1).addReg(Tmp2);
2717 BuildMI(BB, MovOpc, 1, Result).addReg(HiReg);
2718 return Result;
Misha Brukman0e0a7a452005-04-21 23:38:14 +00002719 }
Chris Lattner8db0af12005-04-06 04:21:07 +00002720
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002721 case ISD::SUB:
Chris Lattnera5ade062005-01-11 21:19:59 +00002722 case ISD::MUL:
2723 case ISD::AND:
2724 case ISD::OR:
Chris Lattnera56cea42005-01-12 04:23:22 +00002725 case ISD::XOR: {
Chris Lattnera5ade062005-01-11 21:19:59 +00002726 static const unsigned SUBTab[] = {
2727 X86::SUB8ri, X86::SUB16ri, X86::SUB32ri, 0, 0,
2728 X86::SUB8rm, X86::SUB16rm, X86::SUB32rm, X86::FSUB32m, X86::FSUB64m,
2729 X86::SUB8rr, X86::SUB16rr, X86::SUB32rr, X86::FpSUB , X86::FpSUB,
2730 };
Nate Begemanf63be7d2005-07-06 18:59:04 +00002731 static const unsigned SSE_SUBTab[] = {
2732 X86::SUB8ri, X86::SUB16ri, X86::SUB32ri, 0, 0,
2733 X86::SUB8rm, X86::SUB16rm, X86::SUB32rm, X86::SUBSSrm, X86::SUBSDrm,
2734 X86::SUB8rr, X86::SUB16rr, X86::SUB32rr, X86::SUBSSrr, X86::SUBSDrr,
2735 };
Chris Lattnera5ade062005-01-11 21:19:59 +00002736 static const unsigned MULTab[] = {
2737 0, X86::IMUL16rri, X86::IMUL32rri, 0, 0,
2738 0, X86::IMUL16rm , X86::IMUL32rm, X86::FMUL32m, X86::FMUL64m,
2739 0, X86::IMUL16rr , X86::IMUL32rr, X86::FpMUL , X86::FpMUL,
2740 };
Nate Begemanf63be7d2005-07-06 18:59:04 +00002741 static const unsigned SSE_MULTab[] = {
2742 0, X86::IMUL16rri, X86::IMUL32rri, 0, 0,
2743 0, X86::IMUL16rm , X86::IMUL32rm, X86::MULSSrm, X86::MULSDrm,
2744 0, X86::IMUL16rr , X86::IMUL32rr, X86::MULSSrr, X86::MULSDrr,
2745 };
Chris Lattnera5ade062005-01-11 21:19:59 +00002746 static const unsigned ANDTab[] = {
2747 X86::AND8ri, X86::AND16ri, X86::AND32ri, 0, 0,
2748 X86::AND8rm, X86::AND16rm, X86::AND32rm, 0, 0,
Misha Brukman0e0a7a452005-04-21 23:38:14 +00002749 X86::AND8rr, X86::AND16rr, X86::AND32rr, 0, 0,
Chris Lattnera5ade062005-01-11 21:19:59 +00002750 };
2751 static const unsigned ORTab[] = {
2752 X86::OR8ri, X86::OR16ri, X86::OR32ri, 0, 0,
2753 X86::OR8rm, X86::OR16rm, X86::OR32rm, 0, 0,
2754 X86::OR8rr, X86::OR16rr, X86::OR32rr, 0, 0,
2755 };
2756 static const unsigned XORTab[] = {
2757 X86::XOR8ri, X86::XOR16ri, X86::XOR32ri, 0, 0,
2758 X86::XOR8rm, X86::XOR16rm, X86::XOR32rm, 0, 0,
2759 X86::XOR8rr, X86::XOR16rr, X86::XOR32rr, 0, 0,
2760 };
2761
2762 Op0 = Node->getOperand(0);
2763 Op1 = Node->getOperand(1);
2764
Chris Lattner30ea1e92005-01-19 07:37:26 +00002765 if (Node->getOpcode() == ISD::OR && Op0.hasOneUse() && Op1.hasOneUse())
2766 if (EmitOrOpOp(Op0, Op1, Result)) // Match SHLD, SHRD, and rotates.
Chris Lattner85716372005-01-19 06:18:43 +00002767 return Result;
2768
2769 if (Node->getOpcode() == ISD::SUB)
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002770 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(0)))
2771 if (CN->isNullValue()) { // 0 - N -> neg N
2772 switch (N.getValueType()) {
2773 default: assert(0 && "Cannot sub this type!");
2774 case MVT::i1:
2775 case MVT::i8: Opc = X86::NEG8r; break;
2776 case MVT::i16: Opc = X86::NEG16r; break;
2777 case MVT::i32: Opc = X86::NEG32r; break;
2778 }
2779 Tmp1 = SelectExpr(N.getOperand(1));
2780 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
2781 return Result;
2782 }
2783
Chris Lattnera5ade062005-01-11 21:19:59 +00002784 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
2785 if (CN->isAllOnesValue() && Node->getOpcode() == ISD::XOR) {
Chris Lattnerc98279d2005-01-17 00:23:16 +00002786 Opc = 0;
Chris Lattnerd4dab922005-01-11 04:31:30 +00002787 switch (N.getValueType()) {
2788 default: assert(0 && "Cannot add this type!");
Chris Lattnerc98279d2005-01-17 00:23:16 +00002789 case MVT::i1: break; // Not supported, don't invert upper bits!
Chris Lattnerd4dab922005-01-11 04:31:30 +00002790 case MVT::i8: Opc = X86::NOT8r; break;
2791 case MVT::i16: Opc = X86::NOT16r; break;
2792 case MVT::i32: Opc = X86::NOT32r; break;
2793 }
Chris Lattnerc98279d2005-01-17 00:23:16 +00002794 if (Opc) {
2795 Tmp1 = SelectExpr(Op0);
2796 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
2797 return Result;
2798 }
Chris Lattnerd4dab922005-01-11 04:31:30 +00002799 }
2800
Chris Lattner2a4e5082005-01-17 06:48:02 +00002801 // Fold common multiplies into LEA instructions.
2802 if (Node->getOpcode() == ISD::MUL && N.getValueType() == MVT::i32) {
2803 switch ((int)CN->getValue()) {
2804 default: break;
2805 case 3:
2806 case 5:
2807 case 9:
Chris Lattner2a4e5082005-01-17 06:48:02 +00002808 // Remove N from exprmap so SelectAddress doesn't get confused.
2809 ExprMap.erase(N);
Chris Lattner98a8ba02005-01-18 01:06:26 +00002810 X86AddressMode AM;
Chris Lattner2a4e5082005-01-17 06:48:02 +00002811 SelectAddress(N, AM);
2812 // Restore it to the map.
2813 ExprMap[N] = Result;
2814 addFullAddress(BuildMI(BB, X86::LEA32r, 4, Result), AM);
2815 return Result;
2816 }
2817 }
2818
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002819 switch (N.getValueType()) {
Chris Lattnerd4dab922005-01-11 04:31:30 +00002820 default: assert(0 && "Cannot xor this type!");
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002821 case MVT::i1:
Chris Lattnera5ade062005-01-11 21:19:59 +00002822 case MVT::i8: Opc = 0; break;
2823 case MVT::i16: Opc = 1; break;
2824 case MVT::i32: Opc = 2; break;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002825 }
Chris Lattnera5ade062005-01-11 21:19:59 +00002826 switch (Node->getOpcode()) {
2827 default: assert(0 && "Unreachable!");
Nate Begemanf63be7d2005-07-06 18:59:04 +00002828 case ISD::SUB: Opc = X86ScalarSSE ? SSE_SUBTab[Opc] : SUBTab[Opc]; break;
2829 case ISD::MUL: Opc = X86ScalarSSE ? SSE_MULTab[Opc] : MULTab[Opc]; break;
Chris Lattnera5ade062005-01-11 21:19:59 +00002830 case ISD::AND: Opc = ANDTab[Opc]; break;
2831 case ISD::OR: Opc = ORTab[Opc]; break;
2832 case ISD::XOR: Opc = XORTab[Opc]; break;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002833 }
Chris Lattnera5ade062005-01-11 21:19:59 +00002834 if (Opc) { // Can't fold MUL:i8 R, imm
2835 Tmp1 = SelectExpr(Op0);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002836 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
2837 return Result;
2838 }
2839 }
Chris Lattner11333092005-01-11 03:11:44 +00002840
Chris Lattner44129b52005-01-25 20:03:11 +00002841 if (isFoldableLoad(Op0, Op1, true))
Chris Lattnera5ade062005-01-11 21:19:59 +00002842 if (Node->getOpcode() != ISD::SUB) {
2843 std::swap(Op0, Op1);
Chris Lattner4ff348b2005-01-17 06:26:58 +00002844 goto FoldOps;
Chris Lattnera5ade062005-01-11 21:19:59 +00002845 } else {
Chris Lattner44129b52005-01-25 20:03:11 +00002846 // For FP, emit 'reverse' subract, with a memory operand.
Nate Begemanf63be7d2005-07-06 18:59:04 +00002847 if (N.getValueType() == MVT::f64 && !X86ScalarSSE) {
Chris Lattner44129b52005-01-25 20:03:11 +00002848 if (Op0.getOpcode() == ISD::EXTLOAD)
2849 Opc = X86::FSUBR32m;
2850 else
2851 Opc = X86::FSUBR64m;
2852
Chris Lattnera5ade062005-01-11 21:19:59 +00002853 X86AddressMode AM;
Chris Lattner636e79a2005-01-13 05:53:16 +00002854 EmitFoldedLoad(Op0, AM);
2855 Tmp1 = SelectExpr(Op1);
Chris Lattnera5ade062005-01-11 21:19:59 +00002856 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
2857 return Result;
2858 }
2859 }
2860
Chris Lattner44129b52005-01-25 20:03:11 +00002861 if (isFoldableLoad(Op1, Op0, true)) {
Chris Lattner4ff348b2005-01-17 06:26:58 +00002862 FoldOps:
Chris Lattnera5ade062005-01-11 21:19:59 +00002863 switch (N.getValueType()) {
2864 default: assert(0 && "Cannot operate on this type!");
2865 case MVT::i1:
2866 case MVT::i8: Opc = 5; break;
2867 case MVT::i16: Opc = 6; break;
2868 case MVT::i32: Opc = 7; break;
Nate Begemanf63be7d2005-07-06 18:59:04 +00002869 case MVT::f32: Opc = 8; break;
Chris Lattner44129b52005-01-25 20:03:11 +00002870 // For F64, handle promoted load operations (from F32) as well!
Jeff Cohen00b168892005-07-27 06:12:32 +00002871 case MVT::f64:
2872 assert((!X86ScalarSSE || Op1.getOpcode() == ISD::LOAD) &&
Nate Begemanf63be7d2005-07-06 18:59:04 +00002873 "SSE load should have been promoted");
2874 Opc = Op1.getOpcode() == ISD::LOAD ? 9 : 8; break;
Chris Lattnera5ade062005-01-11 21:19:59 +00002875 }
2876 switch (Node->getOpcode()) {
2877 default: assert(0 && "Unreachable!");
Nate Begemanf63be7d2005-07-06 18:59:04 +00002878 case ISD::SUB: Opc = X86ScalarSSE ? SSE_SUBTab[Opc] : SUBTab[Opc]; break;
2879 case ISD::MUL: Opc = X86ScalarSSE ? SSE_MULTab[Opc] : MULTab[Opc]; break;
Chris Lattnera5ade062005-01-11 21:19:59 +00002880 case ISD::AND: Opc = ANDTab[Opc]; break;
2881 case ISD::OR: Opc = ORTab[Opc]; break;
2882 case ISD::XOR: Opc = XORTab[Opc]; break;
2883 }
2884
2885 X86AddressMode AM;
Chris Lattner636e79a2005-01-13 05:53:16 +00002886 EmitFoldedLoad(Op1, AM);
2887 Tmp1 = SelectExpr(Op0);
Chris Lattnera5ade062005-01-11 21:19:59 +00002888 if (Opc) {
2889 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
2890 } else {
2891 assert(Node->getOpcode() == ISD::MUL &&
2892 N.getValueType() == MVT::i8 && "Unexpected situation!");
2893 // Must use the MUL instruction, which forces use of AL.
2894 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(Tmp1);
2895 addFullAddress(BuildMI(BB, X86::MUL8m, 1), AM);
2896 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
2897 }
2898 return Result;
Chris Lattner11333092005-01-11 03:11:44 +00002899 }
Chris Lattnera5ade062005-01-11 21:19:59 +00002900
2901 if (getRegPressure(Op0) > getRegPressure(Op1)) {
2902 Tmp1 = SelectExpr(Op0);
2903 Tmp2 = SelectExpr(Op1);
2904 } else {
2905 Tmp2 = SelectExpr(Op1);
2906 Tmp1 = SelectExpr(Op0);
2907 }
2908
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002909 switch (N.getValueType()) {
2910 default: assert(0 && "Cannot add this type!");
Chris Lattnera5ade062005-01-11 21:19:59 +00002911 case MVT::i1:
2912 case MVT::i8: Opc = 10; break;
2913 case MVT::i16: Opc = 11; break;
2914 case MVT::i32: Opc = 12; break;
2915 case MVT::f32: Opc = 13; break;
2916 case MVT::f64: Opc = 14; break;
2917 }
2918 switch (Node->getOpcode()) {
2919 default: assert(0 && "Unreachable!");
Nate Begemanf63be7d2005-07-06 18:59:04 +00002920 case ISD::SUB: Opc = X86ScalarSSE ? SSE_SUBTab[Opc] : SUBTab[Opc]; break;
2921 case ISD::MUL: Opc = X86ScalarSSE ? SSE_MULTab[Opc] : MULTab[Opc]; break;
Chris Lattnera5ade062005-01-11 21:19:59 +00002922 case ISD::AND: Opc = ANDTab[Opc]; break;
2923 case ISD::OR: Opc = ORTab[Opc]; break;
2924 case ISD::XOR: Opc = XORTab[Opc]; break;
2925 }
2926 if (Opc) {
2927 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
2928 } else {
2929 assert(Node->getOpcode() == ISD::MUL &&
2930 N.getValueType() == MVT::i8 && "Unexpected situation!");
Chris Lattnera13d3232005-01-10 20:55:48 +00002931 // Must use the MUL instruction, which forces use of AL.
2932 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(Tmp1);
2933 BuildMI(BB, X86::MUL8r, 1).addReg(Tmp2);
2934 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002935 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00002936 return Result;
Chris Lattnera56cea42005-01-12 04:23:22 +00002937 }
Chris Lattner19ad0622005-01-20 18:53:00 +00002938 case ISD::ADD_PARTS:
2939 case ISD::SUB_PARTS: {
2940 assert(N.getNumOperands() == 4 && N.getValueType() == MVT::i32 &&
2941 "Not an i64 add/sub!");
2942 // Emit all of the operands.
2943 std::vector<unsigned> InVals;
2944 for (unsigned i = 0, e = N.getNumOperands(); i != e; ++i)
2945 InVals.push_back(SelectExpr(N.getOperand(i)));
2946 if (N.getOpcode() == ISD::ADD_PARTS) {
2947 BuildMI(BB, X86::ADD32rr, 2, Result).addReg(InVals[0]).addReg(InVals[2]);
2948 BuildMI(BB, X86::ADC32rr,2,Result+1).addReg(InVals[1]).addReg(InVals[3]);
2949 } else {
2950 BuildMI(BB, X86::SUB32rr, 2, Result).addReg(InVals[0]).addReg(InVals[2]);
2951 BuildMI(BB, X86::SBB32rr, 2,Result+1).addReg(InVals[1]).addReg(InVals[3]);
2952 }
2953 return Result+N.ResNo;
2954 }
2955
Chris Lattnerb38a7492005-04-02 04:01:14 +00002956 case ISD::SHL_PARTS:
2957 case ISD::SRA_PARTS:
2958 case ISD::SRL_PARTS: {
2959 assert(N.getNumOperands() == 3 && N.getValueType() == MVT::i32 &&
2960 "Not an i64 shift!");
2961 unsigned ShiftOpLo = SelectExpr(N.getOperand(0));
2962 unsigned ShiftOpHi = SelectExpr(N.getOperand(1));
2963 unsigned TmpReg = MakeReg(MVT::i32);
2964 if (N.getOpcode() == ISD::SRA_PARTS) {
2965 // If this is a SHR of a Long, then we need to do funny sign extension
2966 // stuff. TmpReg gets the value to use as the high-part if we are
2967 // shifting more than 32 bits.
2968 BuildMI(BB, X86::SAR32ri, 2, TmpReg).addReg(ShiftOpHi).addImm(31);
2969 } else {
2970 // Other shifts use a fixed zero value if the shift is more than 32 bits.
2971 BuildMI(BB, X86::MOV32ri, 1, TmpReg).addImm(0);
2972 }
2973
2974 // Initialize CL with the shift amount.
2975 unsigned ShiftAmountReg = SelectExpr(N.getOperand(2));
2976 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(ShiftAmountReg);
2977
2978 unsigned TmpReg2 = MakeReg(MVT::i32);
2979 unsigned TmpReg3 = MakeReg(MVT::i32);
2980 if (N.getOpcode() == ISD::SHL_PARTS) {
2981 // TmpReg2 = shld inHi, inLo
2982 BuildMI(BB, X86::SHLD32rrCL, 2,TmpReg2).addReg(ShiftOpHi)
2983 .addReg(ShiftOpLo);
2984 // TmpReg3 = shl inLo, CL
2985 BuildMI(BB, X86::SHL32rCL, 1, TmpReg3).addReg(ShiftOpLo);
Misha Brukman0e0a7a452005-04-21 23:38:14 +00002986
Chris Lattnerb38a7492005-04-02 04:01:14 +00002987 // Set the flags to indicate whether the shift was by more than 32 bits.
2988 BuildMI(BB, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
Misha Brukman0e0a7a452005-04-21 23:38:14 +00002989
Chris Lattnerb38a7492005-04-02 04:01:14 +00002990 // DestHi = (>32) ? TmpReg3 : TmpReg2;
Misha Brukman0e0a7a452005-04-21 23:38:14 +00002991 BuildMI(BB, X86::CMOVNE32rr, 2,
Chris Lattnerb38a7492005-04-02 04:01:14 +00002992 Result+1).addReg(TmpReg2).addReg(TmpReg3);
2993 // DestLo = (>32) ? TmpReg : TmpReg3;
2994 BuildMI(BB, X86::CMOVNE32rr, 2,
2995 Result).addReg(TmpReg3).addReg(TmpReg);
2996 } else {
2997 // TmpReg2 = shrd inLo, inHi
2998 BuildMI(BB, X86::SHRD32rrCL,2,TmpReg2).addReg(ShiftOpLo)
2999 .addReg(ShiftOpHi);
3000 // TmpReg3 = s[ah]r inHi, CL
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003001 BuildMI(BB, N.getOpcode() == ISD::SRA_PARTS ? X86::SAR32rCL
Chris Lattnerb38a7492005-04-02 04:01:14 +00003002 : X86::SHR32rCL, 1, TmpReg3)
3003 .addReg(ShiftOpHi);
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003004
Chris Lattnerb38a7492005-04-02 04:01:14 +00003005 // Set the flags to indicate whether the shift was by more than 32 bits.
3006 BuildMI(BB, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003007
Chris Lattnerb38a7492005-04-02 04:01:14 +00003008 // DestLo = (>32) ? TmpReg3 : TmpReg2;
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003009 BuildMI(BB, X86::CMOVNE32rr, 2,
Chris Lattnerb38a7492005-04-02 04:01:14 +00003010 Result).addReg(TmpReg2).addReg(TmpReg3);
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003011
Chris Lattnerb38a7492005-04-02 04:01:14 +00003012 // DestHi = (>32) ? TmpReg : TmpReg3;
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003013 BuildMI(BB, X86::CMOVNE32rr, 2,
Chris Lattnerb38a7492005-04-02 04:01:14 +00003014 Result+1).addReg(TmpReg3).addReg(TmpReg);
3015 }
3016 return Result+N.ResNo;
3017 }
3018
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003019 case ISD::SELECT:
Chris Lattnerda2ce112005-01-16 07:34:08 +00003020 if (getRegPressure(N.getOperand(1)) > getRegPressure(N.getOperand(2))) {
3021 Tmp2 = SelectExpr(N.getOperand(1));
3022 Tmp3 = SelectExpr(N.getOperand(2));
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003023 } else {
Chris Lattnerda2ce112005-01-16 07:34:08 +00003024 Tmp3 = SelectExpr(N.getOperand(2));
3025 Tmp2 = SelectExpr(N.getOperand(1));
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003026 }
Chris Lattnerda2ce112005-01-16 07:34:08 +00003027 EmitSelectCC(N.getOperand(0), N.getValueType(), Tmp2, Tmp3, Result);
3028 return Result;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003029
3030 case ISD::SDIV:
3031 case ISD::UDIV:
3032 case ISD::SREM:
3033 case ISD::UREM: {
Chris Lattnerda2ce112005-01-16 07:34:08 +00003034 assert((N.getOpcode() != ISD::SREM || MVT::isInteger(N.getValueType())) &&
3035 "We don't support this operator!");
3036
Chris Lattner5bf26862005-04-13 03:29:53 +00003037 if (N.getOpcode() == ISD::SDIV) {
Chris Lattner3576c842005-01-25 20:35:10 +00003038 // We can fold loads into FpDIVs, but not really into any others.
Nate Begemanb8aa3ac2005-07-07 06:32:01 +00003039 if (N.getValueType() == MVT::f64 && !X86ScalarSSE) {
Chris Lattner3576c842005-01-25 20:35:10 +00003040 // Check for reversed and unreversed DIV.
3041 if (isFoldableLoad(N.getOperand(0), N.getOperand(1), true)) {
3042 if (N.getOperand(0).getOpcode() == ISD::EXTLOAD)
3043 Opc = X86::FDIVR32m;
3044 else
3045 Opc = X86::FDIVR64m;
3046 X86AddressMode AM;
3047 EmitFoldedLoad(N.getOperand(0), AM);
3048 Tmp1 = SelectExpr(N.getOperand(1));
3049 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
3050 return Result;
3051 } else if (isFoldableLoad(N.getOperand(1), N.getOperand(0), true) &&
3052 N.getOperand(1).getOpcode() == ISD::LOAD) {
3053 if (N.getOperand(1).getOpcode() == ISD::EXTLOAD)
3054 Opc = X86::FDIV32m;
3055 else
3056 Opc = X86::FDIV64m;
3057 X86AddressMode AM;
3058 EmitFoldedLoad(N.getOperand(1), AM);
3059 Tmp1 = SelectExpr(N.getOperand(0));
3060 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
3061 return Result;
3062 }
3063 }
3064
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003065 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
3066 // FIXME: These special cases should be handled by the lowering impl!
3067 unsigned RHS = CN->getValue();
3068 bool isNeg = false;
3069 if ((int)RHS < 0) {
3070 isNeg = true;
3071 RHS = -RHS;
3072 }
3073 if (RHS && (RHS & (RHS-1)) == 0) { // Signed division by power of 2?
3074 unsigned Log = log2(RHS);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003075 unsigned SAROpc, SHROpc, ADDOpc, NEGOpc;
3076 switch (N.getValueType()) {
3077 default: assert("Unknown type to signed divide!");
3078 case MVT::i8:
3079 SAROpc = X86::SAR8ri;
3080 SHROpc = X86::SHR8ri;
3081 ADDOpc = X86::ADD8rr;
3082 NEGOpc = X86::NEG8r;
3083 break;
3084 case MVT::i16:
3085 SAROpc = X86::SAR16ri;
3086 SHROpc = X86::SHR16ri;
3087 ADDOpc = X86::ADD16rr;
3088 NEGOpc = X86::NEG16r;
3089 break;
3090 case MVT::i32:
3091 SAROpc = X86::SAR32ri;
3092 SHROpc = X86::SHR32ri;
3093 ADDOpc = X86::ADD32rr;
3094 NEGOpc = X86::NEG32r;
3095 break;
3096 }
Chris Lattnera96e5772005-05-13 21:48:20 +00003097 unsigned RegSize = MVT::getSizeInBits(N.getValueType());
Chris Lattner11333092005-01-11 03:11:44 +00003098 Tmp1 = SelectExpr(N.getOperand(0));
Chris Lattnerca96c822005-05-13 21:50:27 +00003099 unsigned TmpReg;
3100 if (Log != 1) {
3101 TmpReg = MakeReg(N.getValueType());
3102 BuildMI(BB, SAROpc, 2, TmpReg).addReg(Tmp1).addImm(Log-1);
3103 } else {
3104 TmpReg = Tmp1;
3105 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003106 unsigned TmpReg2 = MakeReg(N.getValueType());
Chris Lattnera96e5772005-05-13 21:48:20 +00003107 BuildMI(BB, SHROpc, 2, TmpReg2).addReg(TmpReg).addImm(RegSize-Log);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003108 unsigned TmpReg3 = MakeReg(N.getValueType());
3109 BuildMI(BB, ADDOpc, 2, TmpReg3).addReg(Tmp1).addReg(TmpReg2);
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003110
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003111 unsigned TmpReg4 = isNeg ? MakeReg(N.getValueType()) : Result;
3112 BuildMI(BB, SAROpc, 2, TmpReg4).addReg(TmpReg3).addImm(Log);
3113 if (isNeg)
3114 BuildMI(BB, NEGOpc, 1, Result).addReg(TmpReg4);
3115 return Result;
3116 }
3117 }
Chris Lattner5bf26862005-04-13 03:29:53 +00003118 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003119
Chris Lattner11333092005-01-11 03:11:44 +00003120 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3121 Tmp1 = SelectExpr(N.getOperand(0));
3122 Tmp2 = SelectExpr(N.getOperand(1));
3123 } else {
3124 Tmp2 = SelectExpr(N.getOperand(1));
3125 Tmp1 = SelectExpr(N.getOperand(0));
3126 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003127
3128 bool isSigned = N.getOpcode() == ISD::SDIV || N.getOpcode() == ISD::SREM;
3129 bool isDiv = N.getOpcode() == ISD::SDIV || N.getOpcode() == ISD::UDIV;
3130 unsigned LoReg, HiReg, DivOpcode, MovOpcode, ClrOpcode, SExtOpcode;
3131 switch (N.getValueType()) {
3132 default: assert(0 && "Cannot sdiv this type!");
3133 case MVT::i8:
3134 DivOpcode = isSigned ? X86::IDIV8r : X86::DIV8r;
3135 LoReg = X86::AL;
3136 HiReg = X86::AH;
3137 MovOpcode = X86::MOV8rr;
3138 ClrOpcode = X86::MOV8ri;
3139 SExtOpcode = X86::CBW;
3140 break;
3141 case MVT::i16:
3142 DivOpcode = isSigned ? X86::IDIV16r : X86::DIV16r;
3143 LoReg = X86::AX;
3144 HiReg = X86::DX;
3145 MovOpcode = X86::MOV16rr;
3146 ClrOpcode = X86::MOV16ri;
3147 SExtOpcode = X86::CWD;
3148 break;
3149 case MVT::i32:
3150 DivOpcode = isSigned ? X86::IDIV32r : X86::DIV32r;
Chris Lattner42928302005-01-12 03:16:09 +00003151 LoReg = X86::EAX;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003152 HiReg = X86::EDX;
3153 MovOpcode = X86::MOV32rr;
3154 ClrOpcode = X86::MOV32ri;
3155 SExtOpcode = X86::CDQ;
3156 break;
Nate Begemanf63be7d2005-07-06 18:59:04 +00003157 case MVT::f32:
3158 BuildMI(BB, X86::DIVSSrr, 2, Result).addReg(Tmp1).addReg(Tmp2);
3159 return Result;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003160 case MVT::f64:
Nate Begemanf63be7d2005-07-06 18:59:04 +00003161 Opc = X86ScalarSSE ? X86::DIVSDrr : X86::FpDIV;
3162 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003163 return Result;
3164 }
3165
3166 // Set up the low part.
3167 BuildMI(BB, MovOpcode, 1, LoReg).addReg(Tmp1);
3168
3169 if (isSigned) {
3170 // Sign extend the low part into the high part.
3171 BuildMI(BB, SExtOpcode, 0);
3172 } else {
3173 // Zero out the high part, effectively zero extending the input.
3174 BuildMI(BB, ClrOpcode, 1, HiReg).addImm(0);
3175 }
3176
3177 // Emit the DIV/IDIV instruction.
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003178 BuildMI(BB, DivOpcode, 1).addReg(Tmp2);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003179
3180 // Get the result of the divide or rem.
3181 BuildMI(BB, MovOpcode, 1, Result).addReg(isDiv ? LoReg : HiReg);
3182 return Result;
3183 }
3184
3185 case ISD::SHL:
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003186 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
Chris Lattnera5ade062005-01-11 21:19:59 +00003187 if (CN->getValue() == 1) { // X = SHL Y, 1 -> X = ADD Y, Y
3188 switch (N.getValueType()) {
3189 default: assert(0 && "Cannot shift this type!");
3190 case MVT::i8: Opc = X86::ADD8rr; break;
3191 case MVT::i16: Opc = X86::ADD16rr; break;
3192 case MVT::i32: Opc = X86::ADD32rr; break;
3193 }
3194 Tmp1 = SelectExpr(N.getOperand(0));
3195 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp1);
3196 return Result;
3197 }
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003198
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003199 switch (N.getValueType()) {
3200 default: assert(0 && "Cannot shift this type!");
3201 case MVT::i8: Opc = X86::SHL8ri; break;
3202 case MVT::i16: Opc = X86::SHL16ri; break;
3203 case MVT::i32: Opc = X86::SHL32ri; break;
3204 }
Chris Lattner11333092005-01-11 03:11:44 +00003205 Tmp1 = SelectExpr(N.getOperand(0));
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003206 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
3207 return Result;
3208 }
Chris Lattner11333092005-01-11 03:11:44 +00003209
3210 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3211 Tmp1 = SelectExpr(N.getOperand(0));
3212 Tmp2 = SelectExpr(N.getOperand(1));
3213 } else {
3214 Tmp2 = SelectExpr(N.getOperand(1));
3215 Tmp1 = SelectExpr(N.getOperand(0));
3216 }
3217
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003218 switch (N.getValueType()) {
3219 default: assert(0 && "Cannot shift this type!");
3220 case MVT::i8 : Opc = X86::SHL8rCL; break;
3221 case MVT::i16: Opc = X86::SHL16rCL; break;
3222 case MVT::i32: Opc = X86::SHL32rCL; break;
3223 }
3224 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
3225 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
3226 return Result;
3227 case ISD::SRL:
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003228 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
3229 switch (N.getValueType()) {
3230 default: assert(0 && "Cannot shift this type!");
3231 case MVT::i8: Opc = X86::SHR8ri; break;
3232 case MVT::i16: Opc = X86::SHR16ri; break;
3233 case MVT::i32: Opc = X86::SHR32ri; break;
3234 }
Chris Lattner11333092005-01-11 03:11:44 +00003235 Tmp1 = SelectExpr(N.getOperand(0));
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003236 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
3237 return Result;
3238 }
Chris Lattner11333092005-01-11 03:11:44 +00003239
3240 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3241 Tmp1 = SelectExpr(N.getOperand(0));
3242 Tmp2 = SelectExpr(N.getOperand(1));
3243 } else {
3244 Tmp2 = SelectExpr(N.getOperand(1));
3245 Tmp1 = SelectExpr(N.getOperand(0));
3246 }
3247
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003248 switch (N.getValueType()) {
3249 default: assert(0 && "Cannot shift this type!");
3250 case MVT::i8 : Opc = X86::SHR8rCL; break;
3251 case MVT::i16: Opc = X86::SHR16rCL; break;
3252 case MVT::i32: Opc = X86::SHR32rCL; break;
3253 }
3254 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
3255 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
3256 return Result;
3257 case ISD::SRA:
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003258 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
3259 switch (N.getValueType()) {
3260 default: assert(0 && "Cannot shift this type!");
3261 case MVT::i8: Opc = X86::SAR8ri; break;
3262 case MVT::i16: Opc = X86::SAR16ri; break;
3263 case MVT::i32: Opc = X86::SAR32ri; break;
3264 }
Chris Lattner11333092005-01-11 03:11:44 +00003265 Tmp1 = SelectExpr(N.getOperand(0));
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003266 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
3267 return Result;
3268 }
Chris Lattner11333092005-01-11 03:11:44 +00003269
3270 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3271 Tmp1 = SelectExpr(N.getOperand(0));
3272 Tmp2 = SelectExpr(N.getOperand(1));
3273 } else {
3274 Tmp2 = SelectExpr(N.getOperand(1));
3275 Tmp1 = SelectExpr(N.getOperand(0));
3276 }
3277
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003278 switch (N.getValueType()) {
3279 default: assert(0 && "Cannot shift this type!");
3280 case MVT::i8 : Opc = X86::SAR8rCL; break;
3281 case MVT::i16: Opc = X86::SAR16rCL; break;
3282 case MVT::i32: Opc = X86::SAR32rCL; break;
3283 }
3284 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
3285 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
3286 return Result;
3287
3288 case ISD::SETCC:
Chris Lattnercb1aa8d2005-01-17 01:34:14 +00003289 EmitCMP(N.getOperand(0), N.getOperand(1), Node->hasOneUse());
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003290 EmitSetCC(BB, Result, cast<SetCCSDNode>(N)->getCondition(),
3291 MVT::isFloatingPoint(N.getOperand(1).getValueType()));
3292 return Result;
Chris Lattnere9ef81d2005-01-15 05:22:24 +00003293 case ISD::LOAD:
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003294 // Make sure we generate both values.
Chris Lattner4a108662005-01-18 03:51:59 +00003295 if (Result != 1) { // Generate the token
3296 if (!ExprMap.insert(std::make_pair(N.getValue(1), 1)).second)
3297 assert(0 && "Load already emitted!?");
3298 } else
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003299 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
3300
Chris Lattner5188ad72005-01-08 19:28:19 +00003301 switch (Node->getValueType(0)) {
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003302 default: assert(0 && "Cannot load this type!");
3303 case MVT::i1:
3304 case MVT::i8: Opc = X86::MOV8rm; break;
3305 case MVT::i16: Opc = X86::MOV16rm; break;
3306 case MVT::i32: Opc = X86::MOV32rm; break;
Nate Begemanf63be7d2005-07-06 18:59:04 +00003307 case MVT::f32: Opc = X86::MOVSSrm; break;
Jeff Cohen00b168892005-07-27 06:12:32 +00003308 case MVT::f64:
Nate Begemanf63be7d2005-07-06 18:59:04 +00003309 if (X86ScalarSSE) {
3310 Opc = X86::MOVSDrm;
3311 } else {
3312 Opc = X86::FLD64m;
Jeff Cohen00b168892005-07-27 06:12:32 +00003313 ContainsFPCode = true;
Nate Begemanf63be7d2005-07-06 18:59:04 +00003314 }
3315 break;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003316 }
Chris Lattner11333092005-01-11 03:11:44 +00003317
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003318 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N.getOperand(1))){
Chris Lattner11333092005-01-11 03:11:44 +00003319 Select(N.getOperand(0));
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003320 addConstantPoolReference(BuildMI(BB, Opc, 4, Result), CP->getIndex());
3321 } else {
3322 X86AddressMode AM;
Chris Lattner636e79a2005-01-13 05:53:16 +00003323
3324 SDOperand Chain = N.getOperand(0);
3325 SDOperand Address = N.getOperand(1);
3326 if (getRegPressure(Chain) > getRegPressure(Address)) {
3327 Select(Chain);
3328 SelectAddress(Address, AM);
3329 } else {
3330 SelectAddress(Address, AM);
3331 Select(Chain);
3332 }
3333
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003334 addFullAddress(BuildMI(BB, Opc, 4, Result), AM);
3335 }
3336 return Result;
Chris Lattner67649df2005-05-14 06:52:07 +00003337 case X86ISD::FILD64m:
3338 // Make sure we generate both values.
3339 assert(Result != 1 && N.getValueType() == MVT::f64);
3340 if (!ExprMap.insert(std::make_pair(N.getValue(1), 1)).second)
3341 assert(0 && "Load already emitted!?");
3342
3343 {
3344 X86AddressMode AM;
3345
3346 SDOperand Chain = N.getOperand(0);
3347 SDOperand Address = N.getOperand(1);
3348 if (getRegPressure(Chain) > getRegPressure(Address)) {
3349 Select(Chain);
3350 SelectAddress(Address, AM);
3351 } else {
3352 SelectAddress(Address, AM);
3353 Select(Chain);
3354 }
Chris Lattner745d5382005-07-29 00:40:01 +00003355
3356 addFullAddress(BuildMI(BB, X86::FILD64m, 4, Result), AM);
Chris Lattner67649df2005-05-14 06:52:07 +00003357 }
3358 return Result;
Chris Lattner745d5382005-07-29 00:40:01 +00003359
Chris Lattnere9ef81d2005-01-15 05:22:24 +00003360 case ISD::EXTLOAD: // Arbitrarily codegen extloads as MOVZX*
3361 case ISD::ZEXTLOAD: {
3362 // Make sure we generate both values.
3363 if (Result != 1)
3364 ExprMap[N.getValue(1)] = 1; // Generate the token
3365 else
3366 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
3367
Chris Lattnerda2ce112005-01-16 07:34:08 +00003368 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N.getOperand(1)))
3369 if (Node->getValueType(0) == MVT::f64) {
Chris Lattnerbce81ae2005-07-10 01:56:13 +00003370 assert(cast<VTSDNode>(Node->getOperand(3))->getVT() == MVT::f32 &&
Chris Lattnerda2ce112005-01-16 07:34:08 +00003371 "Bad EXTLOAD!");
3372 addConstantPoolReference(BuildMI(BB, X86::FLD32m, 4, Result),
3373 CP->getIndex());
3374 return Result;
3375 }
3376
Chris Lattnere9ef81d2005-01-15 05:22:24 +00003377 X86AddressMode AM;
3378 if (getRegPressure(Node->getOperand(0)) >
3379 getRegPressure(Node->getOperand(1))) {
3380 Select(Node->getOperand(0)); // chain
3381 SelectAddress(Node->getOperand(1), AM);
3382 } else {
3383 SelectAddress(Node->getOperand(1), AM);
3384 Select(Node->getOperand(0)); // chain
3385 }
3386
3387 switch (Node->getValueType(0)) {
3388 default: assert(0 && "Unknown type to sign extend to.");
3389 case MVT::f64:
Chris Lattnerbce81ae2005-07-10 01:56:13 +00003390 assert(cast<VTSDNode>(Node->getOperand(3))->getVT() == MVT::f32 &&
Chris Lattnere9ef81d2005-01-15 05:22:24 +00003391 "Bad EXTLOAD!");
3392 addFullAddress(BuildMI(BB, X86::FLD32m, 5, Result), AM);
3393 break;
3394 case MVT::i32:
Chris Lattnerbce81ae2005-07-10 01:56:13 +00003395 switch (cast<VTSDNode>(Node->getOperand(3))->getVT()) {
Chris Lattnere9ef81d2005-01-15 05:22:24 +00003396 default:
3397 assert(0 && "Bad zero extend!");
3398 case MVT::i1:
3399 case MVT::i8:
3400 addFullAddress(BuildMI(BB, X86::MOVZX32rm8, 5, Result), AM);
3401 break;
3402 case MVT::i16:
3403 addFullAddress(BuildMI(BB, X86::MOVZX32rm16, 5, Result), AM);
3404 break;
3405 }
3406 break;
3407 case MVT::i16:
Chris Lattnerbce81ae2005-07-10 01:56:13 +00003408 assert(cast<VTSDNode>(Node->getOperand(3))->getVT() <= MVT::i8 &&
Chris Lattnere9ef81d2005-01-15 05:22:24 +00003409 "Bad zero extend!");
3410 addFullAddress(BuildMI(BB, X86::MOVSX16rm8, 5, Result), AM);
3411 break;
3412 case MVT::i8:
Chris Lattnerbce81ae2005-07-10 01:56:13 +00003413 assert(cast<VTSDNode>(Node->getOperand(3))->getVT() == MVT::i1 &&
Chris Lattnere9ef81d2005-01-15 05:22:24 +00003414 "Bad zero extend!");
3415 addFullAddress(BuildMI(BB, X86::MOV8rm, 5, Result), AM);
3416 break;
3417 }
3418 return Result;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003419 }
Chris Lattnere9ef81d2005-01-15 05:22:24 +00003420 case ISD::SEXTLOAD: {
3421 // Make sure we generate both values.
3422 if (Result != 1)
3423 ExprMap[N.getValue(1)] = 1; // Generate the token
3424 else
3425 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
3426
3427 X86AddressMode AM;
3428 if (getRegPressure(Node->getOperand(0)) >
3429 getRegPressure(Node->getOperand(1))) {
3430 Select(Node->getOperand(0)); // chain
3431 SelectAddress(Node->getOperand(1), AM);
3432 } else {
3433 SelectAddress(Node->getOperand(1), AM);
3434 Select(Node->getOperand(0)); // chain
3435 }
3436
3437 switch (Node->getValueType(0)) {
3438 case MVT::i8: assert(0 && "Cannot sign extend from bool!");
3439 default: assert(0 && "Unknown type to sign extend to.");
3440 case MVT::i32:
Chris Lattnerbce81ae2005-07-10 01:56:13 +00003441 switch (cast<VTSDNode>(Node->getOperand(3))->getVT()) {
Chris Lattnere9ef81d2005-01-15 05:22:24 +00003442 default:
3443 case MVT::i1: assert(0 && "Cannot sign extend from bool!");
3444 case MVT::i8:
3445 addFullAddress(BuildMI(BB, X86::MOVSX32rm8, 5, Result), AM);
3446 break;
3447 case MVT::i16:
3448 addFullAddress(BuildMI(BB, X86::MOVSX32rm16, 5, Result), AM);
3449 break;
3450 }
3451 break;
3452 case MVT::i16:
Chris Lattnerbce81ae2005-07-10 01:56:13 +00003453 assert(cast<VTSDNode>(Node->getOperand(3))->getVT() == MVT::i8 &&
Chris Lattnere9ef81d2005-01-15 05:22:24 +00003454 "Cannot sign extend from bool!");
3455 addFullAddress(BuildMI(BB, X86::MOVSX16rm8, 5, Result), AM);
3456 break;
3457 }
3458 return Result;
3459 }
3460
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003461 case ISD::DYNAMIC_STACKALLOC:
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003462 // Generate both result values.
3463 if (Result != 1)
3464 ExprMap[N.getValue(1)] = 1; // Generate the token
3465 else
3466 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
3467
3468 // FIXME: We are currently ignoring the requested alignment for handling
3469 // greater than the stack alignment. This will need to be revisited at some
3470 // point. Align = N.getOperand(2);
3471
3472 if (!isa<ConstantSDNode>(N.getOperand(2)) ||
3473 cast<ConstantSDNode>(N.getOperand(2))->getValue() != 0) {
3474 std::cerr << "Cannot allocate stack object with greater alignment than"
3475 << " the stack alignment yet!";
3476 abort();
3477 }
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003478
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003479 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
Chris Lattner11333092005-01-11 03:11:44 +00003480 Select(N.getOperand(0));
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003481 BuildMI(BB, X86::SUB32ri, 2, X86::ESP).addReg(X86::ESP)
3482 .addImm(CN->getValue());
3483 } else {
Chris Lattner11333092005-01-11 03:11:44 +00003484 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3485 Select(N.getOperand(0));
3486 Tmp1 = SelectExpr(N.getOperand(1));
3487 } else {
3488 Tmp1 = SelectExpr(N.getOperand(1));
3489 Select(N.getOperand(0));
3490 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003491
3492 // Subtract size from stack pointer, thereby allocating some space.
3493 BuildMI(BB, X86::SUB32rr, 2, X86::ESP).addReg(X86::ESP).addReg(Tmp1);
3494 }
3495
3496 // Put a pointer to the space into the result register, by copying the stack
3497 // pointer.
3498 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::ESP);
3499 return Result;
3500
Chris Lattner239738a2005-05-14 08:48:15 +00003501 case X86ISD::TAILCALL:
3502 case X86ISD::CALL: {
Chris Lattner5188ad72005-01-08 19:28:19 +00003503 // The chain for this call is now lowered.
Chris Lattner239738a2005-05-14 08:48:15 +00003504 ExprMap.insert(std::make_pair(N.getValue(0), 1));
Chris Lattner5188ad72005-01-08 19:28:19 +00003505
Chris Lattnerc6f41812005-05-12 23:06:28 +00003506 bool isDirect = isa<GlobalAddressSDNode>(N.getOperand(1)) ||
3507 isa<ExternalSymbolSDNode>(N.getOperand(1));
3508 unsigned Callee = 0;
3509 if (isDirect) {
3510 Select(N.getOperand(0));
3511 } else {
3512 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3513 Select(N.getOperand(0));
3514 Callee = SelectExpr(N.getOperand(1));
3515 } else {
3516 Callee = SelectExpr(N.getOperand(1));
3517 Select(N.getOperand(0));
3518 }
3519 }
3520
3521 // If this call has values to pass in registers, do so now.
Chris Lattner239738a2005-05-14 08:48:15 +00003522 if (Node->getNumOperands() > 4) {
Chris Lattnerc6f41812005-05-12 23:06:28 +00003523 // The first value is passed in (a part of) EAX, the second in EDX.
Chris Lattner239738a2005-05-14 08:48:15 +00003524 unsigned RegOp1 = SelectExpr(N.getOperand(4));
Chris Lattnerc6f41812005-05-12 23:06:28 +00003525 unsigned RegOp2 =
Chris Lattner239738a2005-05-14 08:48:15 +00003526 Node->getNumOperands() > 5 ? SelectExpr(N.getOperand(5)) : 0;
Jeff Cohen00b168892005-07-27 06:12:32 +00003527
Chris Lattner239738a2005-05-14 08:48:15 +00003528 switch (N.getOperand(4).getValueType()) {
Chris Lattnerc6f41812005-05-12 23:06:28 +00003529 default: assert(0 && "Bad thing to pass in regs");
3530 case MVT::i1:
3531 case MVT::i8: BuildMI(BB, X86::MOV8rr , 1,X86::AL).addReg(RegOp1); break;
3532 case MVT::i16: BuildMI(BB, X86::MOV16rr, 1,X86::AX).addReg(RegOp1); break;
3533 case MVT::i32: BuildMI(BB, X86::MOV32rr, 1,X86::EAX).addReg(RegOp1);break;
3534 }
3535 if (RegOp2)
Chris Lattner239738a2005-05-14 08:48:15 +00003536 switch (N.getOperand(5).getValueType()) {
Chris Lattnerc6f41812005-05-12 23:06:28 +00003537 default: assert(0 && "Bad thing to pass in regs");
3538 case MVT::i1:
3539 case MVT::i8:
3540 BuildMI(BB, X86::MOV8rr , 1, X86::DL).addReg(RegOp2);
3541 break;
3542 case MVT::i16:
3543 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(RegOp2);
3544 break;
3545 case MVT::i32:
3546 BuildMI(BB, X86::MOV32rr, 1, X86::EDX).addReg(RegOp2);
3547 break;
3548 }
3549 }
3550
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003551 if (GlobalAddressSDNode *GASD =
3552 dyn_cast<GlobalAddressSDNode>(N.getOperand(1))) {
3553 BuildMI(BB, X86::CALLpcrel32, 1).addGlobalAddress(GASD->getGlobal(),true);
3554 } else if (ExternalSymbolSDNode *ESSDN =
3555 dyn_cast<ExternalSymbolSDNode>(N.getOperand(1))) {
3556 BuildMI(BB, X86::CALLpcrel32,
3557 1).addExternalSymbol(ESSDN->getSymbol(), true);
3558 } else {
Chris Lattner11333092005-01-11 03:11:44 +00003559 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3560 Select(N.getOperand(0));
3561 Tmp1 = SelectExpr(N.getOperand(1));
3562 } else {
3563 Tmp1 = SelectExpr(N.getOperand(1));
3564 Select(N.getOperand(0));
3565 }
3566
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003567 BuildMI(BB, X86::CALL32r, 1).addReg(Tmp1);
3568 }
Chris Lattner239738a2005-05-14 08:48:15 +00003569
3570 // Get caller stack amount and amount the callee added to the stack pointer.
3571 Tmp1 = cast<ConstantSDNode>(N.getOperand(2))->getValue();
3572 Tmp2 = cast<ConstantSDNode>(N.getOperand(3))->getValue();
3573 BuildMI(BB, X86::ADJCALLSTACKUP, 2).addImm(Tmp1).addImm(Tmp2);
3574
3575 if (Node->getNumValues() != 1)
3576 switch (Node->getValueType(1)) {
3577 default: assert(0 && "Unknown value type for call result!");
3578 case MVT::Other: return 1;
3579 case MVT::i1:
3580 case MVT::i8:
3581 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
3582 break;
3583 case MVT::i16:
3584 BuildMI(BB, X86::MOV16rr, 1, Result).addReg(X86::AX);
3585 break;
3586 case MVT::i32:
3587 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::EAX);
3588 if (Node->getNumValues() == 3 && Node->getValueType(2) == MVT::i32)
3589 BuildMI(BB, X86::MOV32rr, 1, Result+1).addReg(X86::EDX);
3590 break;
3591 case MVT::f64: // Floating-point return values live in %ST(0)
Nate Begemanf63be7d2005-07-06 18:59:04 +00003592 if (X86ScalarSSE) {
3593 ContainsFPCode = true;
3594 BuildMI(BB, X86::FpGETRESULT, 1, X86::FP0);
3595
3596 unsigned Size = MVT::getSizeInBits(MVT::f64)/8;
3597 MachineFunction *F = BB->getParent();
3598 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
3599 addFrameReference(BuildMI(BB, X86::FST64m, 5), FrameIdx).addReg(X86::FP0);
3600 addFrameReference(BuildMI(BB, X86::MOVSDrm, 4, Result), FrameIdx);
3601 break;
3602 } else {
3603 ContainsFPCode = true;
3604 BuildMI(BB, X86::FpGETRESULT, 1, Result);
3605 break;
3606 }
Chris Lattner239738a2005-05-14 08:48:15 +00003607 }
3608 return Result+N.ResNo-1;
Chris Lattnerc6f41812005-05-12 23:06:28 +00003609 }
Chris Lattner966cdfb2005-05-09 21:17:38 +00003610 case ISD::READPORT:
3611 // First, determine that the size of the operand falls within the acceptable
3612 // range for this architecture.
3613 //
3614 if (Node->getOperand(1).getValueType() != MVT::i16) {
3615 std::cerr << "llvm.readport: Address size is not 16 bits\n";
3616 exit(1);
3617 }
3618
3619 // Make sure we generate both values.
3620 if (Result != 1) { // Generate the token
3621 if (!ExprMap.insert(std::make_pair(N.getValue(1), 1)).second)
3622 assert(0 && "readport already emitted!?");
3623 } else
3624 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
Jeff Cohen00b168892005-07-27 06:12:32 +00003625
Chris Lattner966cdfb2005-05-09 21:17:38 +00003626 Select(Node->getOperand(0)); // Select the chain.
3627
3628 // If the port is a single-byte constant, use the immediate form.
3629 if (ConstantSDNode *Port = dyn_cast<ConstantSDNode>(Node->getOperand(1)))
3630 if ((Port->getValue() & 255) == Port->getValue()) {
3631 switch (Node->getValueType(0)) {
3632 case MVT::i8:
3633 BuildMI(BB, X86::IN8ri, 1).addImm(Port->getValue());
3634 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
3635 return Result;
3636 case MVT::i16:
3637 BuildMI(BB, X86::IN16ri, 1).addImm(Port->getValue());
3638 BuildMI(BB, X86::MOV16rr, 1, Result).addReg(X86::AX);
3639 return Result;
3640 case MVT::i32:
3641 BuildMI(BB, X86::IN32ri, 1).addImm(Port->getValue());
3642 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::EAX);
3643 return Result;
3644 default: break;
3645 }
3646 }
3647
3648 // Now, move the I/O port address into the DX register and use the IN
3649 // instruction to get the input data.
3650 //
3651 Tmp1 = SelectExpr(Node->getOperand(1));
3652 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(Tmp1);
3653 switch (Node->getValueType(0)) {
3654 case MVT::i8:
3655 BuildMI(BB, X86::IN8rr, 0);
3656 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
3657 return Result;
3658 case MVT::i16:
3659 BuildMI(BB, X86::IN16rr, 0);
3660 BuildMI(BB, X86::MOV16rr, 1, Result).addReg(X86::AX);
3661 return Result;
3662 case MVT::i32:
3663 BuildMI(BB, X86::IN32rr, 0);
3664 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::EAX);
3665 return Result;
3666 default:
3667 std::cerr << "Cannot do input on this data type";
3668 exit(1);
3669 }
Jeff Cohen00b168892005-07-27 06:12:32 +00003670
Chris Lattner8acb1ba2005-01-07 07:49:41 +00003671 }
3672
3673 return 0;
3674}
3675
Chris Lattnere10269b2005-01-17 19:25:26 +00003676/// TryToFoldLoadOpStore - Given a store node, try to fold together a
3677/// load/op/store instruction. If successful return true.
3678bool ISel::TryToFoldLoadOpStore(SDNode *Node) {
3679 assert(Node->getOpcode() == ISD::STORE && "Can only do this for stores!");
3680 SDOperand Chain = Node->getOperand(0);
3681 SDOperand StVal = Node->getOperand(1);
Chris Lattner5c659812005-01-17 22:10:42 +00003682 SDOperand StPtr = Node->getOperand(2);
Chris Lattnere10269b2005-01-17 19:25:26 +00003683
3684 // The chain has to be a load, the stored value must be an integer binary
3685 // operation with one use.
Chris Lattner5c659812005-01-17 22:10:42 +00003686 if (!StVal.Val->hasOneUse() || StVal.Val->getNumOperands() != 2 ||
Chris Lattnere10269b2005-01-17 19:25:26 +00003687 MVT::isFloatingPoint(StVal.getValueType()))
3688 return false;
3689
Chris Lattner5c659812005-01-17 22:10:42 +00003690 // Token chain must either be a factor node or the load to fold.
3691 if (Chain.getOpcode() != ISD::LOAD && Chain.getOpcode() != ISD::TokenFactor)
3692 return false;
Chris Lattnere10269b2005-01-17 19:25:26 +00003693
Chris Lattner5c659812005-01-17 22:10:42 +00003694 SDOperand TheLoad;
3695
3696 // Check to see if there is a load from the same pointer that we're storing
3697 // to in either operand of the binop.
3698 if (StVal.getOperand(0).getOpcode() == ISD::LOAD &&
3699 StVal.getOperand(0).getOperand(1) == StPtr)
3700 TheLoad = StVal.getOperand(0);
3701 else if (StVal.getOperand(1).getOpcode() == ISD::LOAD &&
3702 StVal.getOperand(1).getOperand(1) == StPtr)
3703 TheLoad = StVal.getOperand(1);
3704 else
3705 return false; // No matching load operand.
3706
3707 // We can only fold the load if there are no intervening side-effecting
3708 // operations. This means that the store uses the load as its token chain, or
3709 // there are only token factor nodes in between the store and load.
3710 if (Chain != TheLoad.getValue(1)) {
3711 // Okay, the other option is that we have a store referring to (possibly
3712 // nested) token factor nodes. For now, just try peeking through one level
3713 // of token factors to see if this is the case.
3714 bool ChainOk = false;
3715 if (Chain.getOpcode() == ISD::TokenFactor) {
3716 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
3717 if (Chain.getOperand(i) == TheLoad.getValue(1)) {
3718 ChainOk = true;
3719 break;
3720 }
3721 }
3722
3723 if (!ChainOk) return false;
3724 }
3725
3726 if (TheLoad.getOperand(1) != StPtr)
Chris Lattnere10269b2005-01-17 19:25:26 +00003727 return false;
3728
3729 // Make sure that one of the operands of the binop is the load, and that the
3730 // load folds into the binop.
3731 if (((StVal.getOperand(0) != TheLoad ||
3732 !isFoldableLoad(TheLoad, StVal.getOperand(1))) &&
3733 (StVal.getOperand(1) != TheLoad ||
3734 !isFoldableLoad(TheLoad, StVal.getOperand(0)))))
3735 return false;
3736
3737 // Finally, check to see if this is one of the ops we can handle!
3738 static const unsigned ADDTAB[] = {
3739 X86::ADD8mi, X86::ADD16mi, X86::ADD32mi,
3740 X86::ADD8mr, X86::ADD16mr, X86::ADD32mr,
3741 };
3742 static const unsigned SUBTAB[] = {
3743 X86::SUB8mi, X86::SUB16mi, X86::SUB32mi,
3744 X86::SUB8mr, X86::SUB16mr, X86::SUB32mr,
3745 };
3746 static const unsigned ANDTAB[] = {
3747 X86::AND8mi, X86::AND16mi, X86::AND32mi,
3748 X86::AND8mr, X86::AND16mr, X86::AND32mr,
3749 };
3750 static const unsigned ORTAB[] = {
3751 X86::OR8mi, X86::OR16mi, X86::OR32mi,
3752 X86::OR8mr, X86::OR16mr, X86::OR32mr,
3753 };
3754 static const unsigned XORTAB[] = {
3755 X86::XOR8mi, X86::XOR16mi, X86::XOR32mi,
3756 X86::XOR8mr, X86::XOR16mr, X86::XOR32mr,
3757 };
3758 static const unsigned SHLTAB[] = {
3759 X86::SHL8mi, X86::SHL16mi, X86::SHL32mi,
3760 /*Have to put the reg in CL*/0, 0, 0,
3761 };
3762 static const unsigned SARTAB[] = {
3763 X86::SAR8mi, X86::SAR16mi, X86::SAR32mi,
3764 /*Have to put the reg in CL*/0, 0, 0,
3765 };
3766 static const unsigned SHRTAB[] = {
3767 X86::SHR8mi, X86::SHR16mi, X86::SHR32mi,
3768 /*Have to put the reg in CL*/0, 0, 0,
3769 };
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003770
Chris Lattnere10269b2005-01-17 19:25:26 +00003771 const unsigned *TabPtr = 0;
3772 switch (StVal.getOpcode()) {
3773 default:
3774 std::cerr << "CANNOT [mem] op= val: ";
3775 StVal.Val->dump(); std::cerr << "\n";
3776 case ISD::MUL:
3777 case ISD::SDIV:
3778 case ISD::UDIV:
3779 case ISD::SREM:
3780 case ISD::UREM: return false;
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003781
Chris Lattnere10269b2005-01-17 19:25:26 +00003782 case ISD::ADD: TabPtr = ADDTAB; break;
3783 case ISD::SUB: TabPtr = SUBTAB; break;
3784 case ISD::AND: TabPtr = ANDTAB; break;
3785 case ISD:: OR: TabPtr = ORTAB; break;
3786 case ISD::XOR: TabPtr = XORTAB; break;
3787 case ISD::SHL: TabPtr = SHLTAB; break;
3788 case ISD::SRA: TabPtr = SARTAB; break;
3789 case ISD::SRL: TabPtr = SHRTAB; break;
3790 }
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003791
Chris Lattnere10269b2005-01-17 19:25:26 +00003792 // Handle: [mem] op= CST
3793 SDOperand Op0 = StVal.getOperand(0);
3794 SDOperand Op1 = StVal.getOperand(1);
Chris Lattner0a078832005-01-23 23:20:06 +00003795 unsigned Opc = 0;
Chris Lattnere10269b2005-01-17 19:25:26 +00003796 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
3797 switch (Op0.getValueType()) { // Use Op0's type because of shifts.
3798 default: break;
3799 case MVT::i1:
3800 case MVT::i8: Opc = TabPtr[0]; break;
3801 case MVT::i16: Opc = TabPtr[1]; break;
3802 case MVT::i32: Opc = TabPtr[2]; break;
3803 }
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003804
Chris Lattnere10269b2005-01-17 19:25:26 +00003805 if (Opc) {
Chris Lattner4a108662005-01-18 03:51:59 +00003806 if (!ExprMap.insert(std::make_pair(TheLoad.getValue(1), 1)).second)
3807 assert(0 && "Already emitted?");
Chris Lattner5c659812005-01-17 22:10:42 +00003808 Select(Chain);
3809
Chris Lattnere10269b2005-01-17 19:25:26 +00003810 X86AddressMode AM;
3811 if (getRegPressure(TheLoad.getOperand(0)) >
3812 getRegPressure(TheLoad.getOperand(1))) {
3813 Select(TheLoad.getOperand(0));
3814 SelectAddress(TheLoad.getOperand(1), AM);
3815 } else {
3816 SelectAddress(TheLoad.getOperand(1), AM);
3817 Select(TheLoad.getOperand(0));
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003818 }
Chris Lattner5c659812005-01-17 22:10:42 +00003819
3820 if (StVal.getOpcode() == ISD::ADD) {
3821 if (CN->getValue() == 1) {
3822 switch (Op0.getValueType()) {
3823 default: break;
3824 case MVT::i8:
3825 addFullAddress(BuildMI(BB, X86::INC8m, 4), AM);
3826 return true;
3827 case MVT::i16: Opc = TabPtr[1];
3828 addFullAddress(BuildMI(BB, X86::INC16m, 4), AM);
3829 return true;
3830 case MVT::i32: Opc = TabPtr[2];
3831 addFullAddress(BuildMI(BB, X86::INC32m, 4), AM);
3832 return true;
3833 }
3834 } else if (CN->getValue()+1 == 0) { // [X] += -1 -> DEC [X]
3835 switch (Op0.getValueType()) {
3836 default: break;
3837 case MVT::i8:
3838 addFullAddress(BuildMI(BB, X86::DEC8m, 4), AM);
3839 return true;
3840 case MVT::i16: Opc = TabPtr[1];
3841 addFullAddress(BuildMI(BB, X86::DEC16m, 4), AM);
3842 return true;
3843 case MVT::i32: Opc = TabPtr[2];
3844 addFullAddress(BuildMI(BB, X86::DEC32m, 4), AM);
3845 return true;
3846 }
3847 }
3848 }
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003849
Chris Lattnere10269b2005-01-17 19:25:26 +00003850 addFullAddress(BuildMI(BB, Opc, 4+1),AM).addImm(CN->getValue());
3851 return true;
3852 }
3853 }
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003854
Chris Lattnere10269b2005-01-17 19:25:26 +00003855 // If we have [mem] = V op [mem], try to turn it into:
3856 // [mem] = [mem] op V.
3857 if (Op1 == TheLoad && StVal.getOpcode() != ISD::SUB &&
3858 StVal.getOpcode() != ISD::SHL && StVal.getOpcode() != ISD::SRA &&
3859 StVal.getOpcode() != ISD::SRL)
3860 std::swap(Op0, Op1);
Misha Brukman0e0a7a452005-04-21 23:38:14 +00003861
Chris Lattnere10269b2005-01-17 19:25:26 +00003862 if (Op0 != TheLoad) return false;
3863
3864 switch (Op0.getValueType()) {
3865 default: return false;
3866 case MVT::i1:
3867 case MVT::i8: Opc = TabPtr[3]; break;
3868 case MVT::i16: Opc = TabPtr[4]; break;
3869 case MVT::i32: Opc = TabPtr[5]; break;
3870 }
Chris Lattner5c659812005-01-17 22:10:42 +00003871
Chris Lattnerb422aea2005-01-18 17:35:28 +00003872 // Table entry doesn't exist?
3873 if (Opc == 0) return false;
3874
Chris Lattner4a108662005-01-18 03:51:59 +00003875 if (!ExprMap.insert(std::make_pair(TheLoad.getValue(1), 1)).second)
3876 assert(0 && "Already emitted?");
Chris Lattner5c659812005-01-17 22:10:42 +00003877 Select(Chain);
Chris Lattnere10269b2005-01-17 19:25:26 +00003878 Select(TheLoad.getOperand(0));
Chris Lattner98a8ba02005-01-18 01:06:26 +00003879
Chris Lattnere10269b2005-01-17 19:25:26 +00003880 X86AddressMode AM;
3881 SelectAddress(TheLoad.getOperand(1), AM);
3882 unsigned Reg = SelectExpr(Op1);
Chris Lattner98a8ba02005-01-18 01:06:26 +00003883 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addReg(Reg);
Chris Lattnere10269b2005-01-17 19:25:26 +00003884 return true;
3885}
3886
Chris Lattner381e8872005-05-15 05:46:45 +00003887/// If node is a ret(tailcall) node, emit the specified tail call and return
3888/// true, otherwise return false.
3889///
3890/// FIXME: This whole thing should be a post-legalize optimization pass which
3891/// recognizes and transforms the dag. We don't want the selection phase doing
3892/// this stuff!!
3893///
3894bool ISel::EmitPotentialTailCall(SDNode *RetNode) {
3895 assert(RetNode->getOpcode() == ISD::RET && "Not a return");
3896
3897 SDOperand Chain = RetNode->getOperand(0);
3898
3899 // If this is a token factor node where one operand is a call, dig into it.
3900 SDOperand TokFactor;
3901 unsigned TokFactorOperand = 0;
3902 if (Chain.getOpcode() == ISD::TokenFactor) {
3903 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
3904 if (Chain.getOperand(i).getOpcode() == ISD::CALLSEQ_END ||
3905 Chain.getOperand(i).getOpcode() == X86ISD::TAILCALL) {
3906 TokFactorOperand = i;
3907 TokFactor = Chain;
3908 Chain = Chain.getOperand(i);
3909 break;
3910 }
3911 if (TokFactor.Val == 0) return false; // No call operand.
3912 }
3913
3914 // Skip the CALLSEQ_END node if present.
3915 if (Chain.getOpcode() == ISD::CALLSEQ_END)
3916 Chain = Chain.getOperand(0);
3917
3918 // Is a tailcall the last control operation that occurs before the return?
3919 if (Chain.getOpcode() != X86ISD::TAILCALL)
3920 return false;
3921
3922 // If we return a value, is it the value produced by the call?
3923 if (RetNode->getNumOperands() > 1) {
3924 // Not returning the ret val of the call?
3925 if (Chain.Val->getNumValues() == 1 ||
3926 RetNode->getOperand(1) != Chain.getValue(1))
3927 return false;
3928
3929 if (RetNode->getNumOperands() > 2) {
3930 if (Chain.Val->getNumValues() == 2 ||
3931 RetNode->getOperand(2) != Chain.getValue(2))
3932 return false;
3933 }
3934 assert(RetNode->getNumOperands() <= 3);
3935 }
3936
3937 // CalleeCallArgAmt - The total number of bytes used for the callee arg area.
3938 // For FastCC, this will always be > 0.
3939 unsigned CalleeCallArgAmt =
3940 cast<ConstantSDNode>(Chain.getOperand(2))->getValue();
3941
3942 // CalleeCallArgPopAmt - The number of bytes in the call area popped by the
3943 // callee. For FastCC this will always be > 0, for CCC this is always 0.
3944 unsigned CalleeCallArgPopAmt =
3945 cast<ConstantSDNode>(Chain.getOperand(3))->getValue();
3946
3947 // There are several cases we can handle here. First, if the caller and
3948 // callee are both CCC functions, we can tailcall if the callee takes <= the
3949 // number of argument bytes that the caller does.
3950 if (CalleeCallArgPopAmt == 0 && // Callee is C CallingConv?
3951 X86Lowering.getBytesToPopOnReturn() == 0) { // Caller is C CallingConv?
3952 // Check to see if caller arg area size >= callee arg area size.
3953 if (X86Lowering.getBytesCallerReserves() >= CalleeCallArgAmt) {
3954 //std::cerr << "CCC TAILCALL UNIMP!\n";
3955 // If TokFactor is non-null, emit all operands.
3956
3957 //EmitCCCToCCCTailCall(Chain.Val);
3958 //return true;
3959 }
3960 return false;
3961 }
3962
3963 // Second, if both are FastCC functions, we can always perform the tail call.
3964 if (CalleeCallArgPopAmt && X86Lowering.getBytesToPopOnReturn()) {
3965 // If TokFactor is non-null, emit all operands before the call.
3966 if (TokFactor.Val) {
3967 for (unsigned i = 0, e = TokFactor.getNumOperands(); i != e; ++i)
3968 if (i != TokFactorOperand)
3969 Select(TokFactor.getOperand(i));
3970 }
3971
3972 EmitFastCCToFastCCTailCall(Chain.Val);
3973 return true;
3974 }
3975
3976 // We don't support mixed calls, due to issues with alignment. We could in
3977 // theory handle some mixed calls from CCC -> FastCC if the stack is properly
3978 // aligned (which depends on the number of arguments to the callee). TODO.
3979 return false;
3980}
3981
3982static SDOperand GetAdjustedArgumentStores(SDOperand Chain, int Offset,
3983 SelectionDAG &DAG) {
3984 MVT::ValueType StoreVT;
3985 switch (Chain.getOpcode()) {
3986 case ISD::CALLSEQ_START:
Chris Lattnerea035432005-05-15 06:07:10 +00003987 // If we found the start of the call sequence, we're done. We actually
3988 // strip off the CALLSEQ_START node, to avoid generating the
3989 // ADJCALLSTACKDOWN marker for the tail call.
3990 return Chain.getOperand(0);
Chris Lattner381e8872005-05-15 05:46:45 +00003991 case ISD::TokenFactor: {
3992 std::vector<SDOperand> Ops;
3993 Ops.reserve(Chain.getNumOperands());
3994 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
3995 Ops.push_back(GetAdjustedArgumentStores(Chain.getOperand(i), Offset,DAG));
3996 return DAG.getNode(ISD::TokenFactor, MVT::Other, Ops);
3997 }
3998 case ISD::STORE: // Normal store
3999 StoreVT = Chain.getOperand(1).getValueType();
4000 break;
4001 case ISD::TRUNCSTORE: // FLOAT store
Chris Lattner9fadb4c2005-07-10 00:29:18 +00004002 StoreVT = cast<VTSDNode>(Chain.getOperand(4))->getVT();
Chris Lattner381e8872005-05-15 05:46:45 +00004003 break;
4004 }
4005
4006 SDOperand OrigDest = Chain.getOperand(2);
4007 unsigned OrigOffset;
4008
4009 if (OrigDest.getOpcode() == ISD::CopyFromReg) {
4010 OrigOffset = 0;
4011 assert(cast<RegSDNode>(OrigDest)->getReg() == X86::ESP);
4012 } else {
4013 // We expect only (ESP+C)
4014 assert(OrigDest.getOpcode() == ISD::ADD &&
4015 isa<ConstantSDNode>(OrigDest.getOperand(1)) &&
4016 OrigDest.getOperand(0).getOpcode() == ISD::CopyFromReg &&
4017 cast<RegSDNode>(OrigDest.getOperand(0))->getReg() == X86::ESP);
4018 OrigOffset = cast<ConstantSDNode>(OrigDest.getOperand(1))->getValue();
4019 }
4020
4021 // Compute the new offset from the incoming ESP value we wish to use.
4022 unsigned NewOffset = OrigOffset + Offset;
4023
4024 unsigned OpSize = (MVT::getSizeInBits(StoreVT)+7)/8; // Bits -> Bytes
4025 MachineFunction &MF = DAG.getMachineFunction();
4026 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, NewOffset);
4027 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
4028
4029 SDOperand InChain = GetAdjustedArgumentStores(Chain.getOperand(0), Offset,
4030 DAG);
4031 if (Chain.getOpcode() == ISD::STORE)
4032 return DAG.getNode(ISD::STORE, MVT::Other, InChain, Chain.getOperand(1),
4033 FIN);
4034 assert(Chain.getOpcode() == ISD::TRUNCSTORE);
4035 return DAG.getNode(ISD::TRUNCSTORE, MVT::Other, InChain, Chain.getOperand(1),
Chris Lattner9fadb4c2005-07-10 00:29:18 +00004036 FIN, DAG.getSrcValue(NULL), DAG.getValueType(StoreVT));
Chris Lattner381e8872005-05-15 05:46:45 +00004037}
4038
4039
4040/// EmitFastCCToFastCCTailCall - Given a tailcall in the tail position to a
4041/// fastcc function from a fastcc function, emit the code to emit a 'proper'
4042/// tail call.
4043void ISel::EmitFastCCToFastCCTailCall(SDNode *TailCallNode) {
4044 unsigned CalleeCallArgSize =
4045 cast<ConstantSDNode>(TailCallNode->getOperand(2))->getValue();
4046 unsigned CallerArgSize = X86Lowering.getBytesToPopOnReturn();
4047
4048 //std::cerr << "****\n*** EMITTING TAIL CALL!\n****\n";
4049
4050 // Adjust argument stores. Instead of storing to [ESP], f.e., store to frame
4051 // indexes that are relative to the incoming ESP. If the incoming and
4052 // outgoing arg sizes are the same we will store to [InESP] instead of
4053 // [CurESP] and the ESP referenced will be relative to the incoming function
4054 // ESP.
4055 int ESPOffset = CallerArgSize-CalleeCallArgSize;
4056 SDOperand AdjustedArgStores =
4057 GetAdjustedArgumentStores(TailCallNode->getOperand(0), ESPOffset, *TheDAG);
4058
4059 // Copy the return address of the caller into a virtual register so we don't
4060 // clobber it.
4061 SDOperand RetVal;
4062 if (ESPOffset) {
4063 SDOperand RetValAddr = X86Lowering.getReturnAddressFrameIndex(*TheDAG);
4064 RetVal = TheDAG->getLoad(MVT::i32, TheDAG->getEntryNode(),
4065 RetValAddr, TheDAG->getSrcValue(NULL));
4066 SelectExpr(RetVal);
4067 }
4068
4069 // Codegen all of the argument stores.
4070 Select(AdjustedArgStores);
4071
4072 if (RetVal.Val) {
4073 // Emit a store of the saved ret value to the new location.
4074 MachineFunction &MF = TheDAG->getMachineFunction();
4075 int ReturnAddrFI = MF.getFrameInfo()->CreateFixedObject(4, ESPOffset-4);
4076 SDOperand RetValAddr = TheDAG->getFrameIndex(ReturnAddrFI, MVT::i32);
4077 Select(TheDAG->getNode(ISD::STORE, MVT::Other, TheDAG->getEntryNode(),
4078 RetVal, RetValAddr));
4079 }
4080
4081 // Get the destination value.
4082 SDOperand Callee = TailCallNode->getOperand(1);
4083 bool isDirect = isa<GlobalAddressSDNode>(Callee) ||
4084 isa<ExternalSymbolSDNode>(Callee);
Chris Lattner9cb2d612005-06-17 13:23:32 +00004085 unsigned CalleeReg = 0;
Chris Lattner381e8872005-05-15 05:46:45 +00004086 if (!isDirect) CalleeReg = SelectExpr(Callee);
4087
4088 unsigned RegOp1 = 0;
4089 unsigned RegOp2 = 0;
4090
4091 if (TailCallNode->getNumOperands() > 4) {
4092 // The first value is passed in (a part of) EAX, the second in EDX.
4093 RegOp1 = SelectExpr(TailCallNode->getOperand(4));
4094 if (TailCallNode->getNumOperands() > 5)
4095 RegOp2 = SelectExpr(TailCallNode->getOperand(5));
Jeff Cohen00b168892005-07-27 06:12:32 +00004096
Chris Lattner381e8872005-05-15 05:46:45 +00004097 switch (TailCallNode->getOperand(4).getValueType()) {
4098 default: assert(0 && "Bad thing to pass in regs");
4099 case MVT::i1:
4100 case MVT::i8:
4101 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(RegOp1);
4102 RegOp1 = X86::AL;
4103 break;
4104 case MVT::i16:
4105 BuildMI(BB, X86::MOV16rr, 1,X86::AX).addReg(RegOp1);
4106 RegOp1 = X86::AX;
4107 break;
4108 case MVT::i32:
4109 BuildMI(BB, X86::MOV32rr, 1,X86::EAX).addReg(RegOp1);
4110 RegOp1 = X86::EAX;
4111 break;
4112 }
4113 if (RegOp2)
4114 switch (TailCallNode->getOperand(5).getValueType()) {
4115 default: assert(0 && "Bad thing to pass in regs");
4116 case MVT::i1:
4117 case MVT::i8:
4118 BuildMI(BB, X86::MOV8rr, 1, X86::DL).addReg(RegOp2);
4119 RegOp2 = X86::DL;
4120 break;
4121 case MVT::i16:
4122 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(RegOp2);
4123 RegOp2 = X86::DX;
4124 break;
4125 case MVT::i32:
4126 BuildMI(BB, X86::MOV32rr, 1, X86::EDX).addReg(RegOp2);
4127 RegOp2 = X86::EDX;
4128 break;
4129 }
4130 }
4131
4132 // Adjust ESP.
4133 if (ESPOffset)
4134 BuildMI(BB, X86::ADJSTACKPTRri, 2,
4135 X86::ESP).addReg(X86::ESP).addImm(ESPOffset);
4136
4137 // TODO: handle jmp [mem]
4138 if (!isDirect) {
4139 BuildMI(BB, X86::TAILJMPr, 1).addReg(CalleeReg);
4140 } else if (GlobalAddressSDNode *GASD = dyn_cast<GlobalAddressSDNode>(Callee)){
Chris Lattner16cb6f82005-05-19 05:54:33 +00004141 BuildMI(BB, X86::TAILJMPd, 1).addGlobalAddress(GASD->getGlobal(), true);
Chris Lattner381e8872005-05-15 05:46:45 +00004142 } else {
4143 ExternalSymbolSDNode *ESSDN = cast<ExternalSymbolSDNode>(Callee);
4144 BuildMI(BB, X86::TAILJMPd, 1).addExternalSymbol(ESSDN->getSymbol(), true);
4145 }
4146 // ADD IMPLICIT USE RegOp1/RegOp2's
4147}
4148
Chris Lattnere10269b2005-01-17 19:25:26 +00004149
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004150void ISel::Select(SDOperand N) {
4151 unsigned Tmp1, Tmp2, Opc;
4152
Nate Begeman85fdeb22005-03-24 04:39:54 +00004153 if (!ExprMap.insert(std::make_pair(N, 1)).second)
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004154 return; // Already selected.
4155
Chris Lattner989de032005-01-11 06:14:36 +00004156 SDNode *Node = N.Val;
4157
4158 switch (Node->getOpcode()) {
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004159 default:
Chris Lattner989de032005-01-11 06:14:36 +00004160 Node->dump(); std::cerr << "\n";
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004161 assert(0 && "Node not handled yet!");
4162 case ISD::EntryToken: return; // Noop
Chris Lattnerc3580712005-01-13 18:01:36 +00004163 case ISD::TokenFactor:
Chris Lattner1d50b7f2005-01-13 19:56:00 +00004164 if (Node->getNumOperands() == 2) {
Misha Brukman0e0a7a452005-04-21 23:38:14 +00004165 bool OneFirst =
Chris Lattner1d50b7f2005-01-13 19:56:00 +00004166 getRegPressure(Node->getOperand(1))>getRegPressure(Node->getOperand(0));
4167 Select(Node->getOperand(OneFirst));
4168 Select(Node->getOperand(!OneFirst));
4169 } else {
4170 std::vector<std::pair<unsigned, unsigned> > OpsP;
4171 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i)
4172 OpsP.push_back(std::make_pair(getRegPressure(Node->getOperand(i)), i));
4173 std::sort(OpsP.begin(), OpsP.end());
4174 std::reverse(OpsP.begin(), OpsP.end());
4175 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i)
4176 Select(Node->getOperand(OpsP[i].second));
4177 }
Chris Lattnerc3580712005-01-13 18:01:36 +00004178 return;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004179 case ISD::CopyToReg:
Chris Lattneref6806c2005-01-12 02:02:48 +00004180 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
4181 Select(N.getOperand(0));
4182 Tmp1 = SelectExpr(N.getOperand(1));
4183 } else {
4184 Tmp1 = SelectExpr(N.getOperand(1));
4185 Select(N.getOperand(0));
4186 }
Chris Lattner18c2f132005-01-13 20:50:02 +00004187 Tmp2 = cast<RegSDNode>(N)->getReg();
Misha Brukman0e0a7a452005-04-21 23:38:14 +00004188
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004189 if (Tmp1 != Tmp2) {
4190 switch (N.getOperand(1).getValueType()) {
4191 default: assert(0 && "Invalid type for operation!");
4192 case MVT::i1:
4193 case MVT::i8: Opc = X86::MOV8rr; break;
4194 case MVT::i16: Opc = X86::MOV16rr; break;
4195 case MVT::i32: Opc = X86::MOV32rr; break;
Nate Begemanf63be7d2005-07-06 18:59:04 +00004196 case MVT::f32: Opc = X86::MOVAPSrr; break;
Jeff Cohen00b168892005-07-27 06:12:32 +00004197 case MVT::f64:
Nate Begemanf63be7d2005-07-06 18:59:04 +00004198 if (X86ScalarSSE) {
4199 Opc = X86::MOVAPDrr;
4200 } else {
Jeff Cohen00b168892005-07-27 06:12:32 +00004201 Opc = X86::FpMOV;
4202 ContainsFPCode = true;
Nate Begemanf63be7d2005-07-06 18:59:04 +00004203 }
4204 break;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004205 }
4206 BuildMI(BB, Opc, 1, Tmp2).addReg(Tmp1);
4207 }
4208 return;
4209 case ISD::RET:
Chris Lattner381e8872005-05-15 05:46:45 +00004210 if (N.getOperand(0).getOpcode() == ISD::CALLSEQ_END ||
4211 N.getOperand(0).getOpcode() == X86ISD::TAILCALL ||
4212 N.getOperand(0).getOpcode() == ISD::TokenFactor)
4213 if (EmitPotentialTailCall(Node))
4214 return;
4215
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004216 switch (N.getNumOperands()) {
4217 default:
4218 assert(0 && "Unknown return instruction!");
4219 case 3:
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004220 assert(N.getOperand(1).getValueType() == MVT::i32 &&
Jeff Cohen00b168892005-07-27 06:12:32 +00004221 N.getOperand(2).getValueType() == MVT::i32 &&
4222 "Unknown two-register value!");
Chris Lattner11333092005-01-11 03:11:44 +00004223 if (getRegPressure(N.getOperand(1)) > getRegPressure(N.getOperand(2))) {
4224 Tmp1 = SelectExpr(N.getOperand(1));
4225 Tmp2 = SelectExpr(N.getOperand(2));
4226 } else {
4227 Tmp2 = SelectExpr(N.getOperand(2));
4228 Tmp1 = SelectExpr(N.getOperand(1));
4229 }
4230 Select(N.getOperand(0));
4231
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004232 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
4233 BuildMI(BB, X86::MOV32rr, 1, X86::EDX).addReg(Tmp2);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004234 break;
4235 case 2:
Chris Lattner11333092005-01-11 03:11:44 +00004236 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
4237 Select(N.getOperand(0));
4238 Tmp1 = SelectExpr(N.getOperand(1));
4239 } else {
4240 Tmp1 = SelectExpr(N.getOperand(1));
4241 Select(N.getOperand(0));
4242 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004243 switch (N.getOperand(1).getValueType()) {
4244 default: assert(0 && "All other types should have been promoted!!");
Nate Begemanf63be7d2005-07-06 18:59:04 +00004245 case MVT::f32:
4246 if (X86ScalarSSE) {
4247 // Spill the value to memory and reload it into top of stack.
4248 unsigned Size = MVT::getSizeInBits(MVT::f32)/8;
4249 MachineFunction *F = BB->getParent();
4250 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
4251 addFrameReference(BuildMI(BB, X86::MOVSSmr, 5), FrameIdx).addReg(Tmp1);
4252 addFrameReference(BuildMI(BB, X86::FLD32m, 4, X86::FP0), FrameIdx);
4253 BuildMI(BB, X86::FpSETRESULT, 1).addReg(X86::FP0);
Jeff Cohen00b168892005-07-27 06:12:32 +00004254 ContainsFPCode = true;
Nate Begemanf63be7d2005-07-06 18:59:04 +00004255 } else {
4256 assert(0 && "MVT::f32 only legal with scalar sse fp");
4257 abort();
4258 }
4259 break;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004260 case MVT::f64:
Nate Begemanf63be7d2005-07-06 18:59:04 +00004261 if (X86ScalarSSE) {
4262 // Spill the value to memory and reload it into top of stack.
4263 unsigned Size = MVT::getSizeInBits(MVT::f64)/8;
4264 MachineFunction *F = BB->getParent();
4265 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
4266 addFrameReference(BuildMI(BB, X86::MOVSDmr, 5), FrameIdx).addReg(Tmp1);
4267 addFrameReference(BuildMI(BB, X86::FLD64m, 4, X86::FP0), FrameIdx);
4268 BuildMI(BB, X86::FpSETRESULT, 1).addReg(X86::FP0);
Jeff Cohen00b168892005-07-27 06:12:32 +00004269 ContainsFPCode = true;
Nate Begemanf63be7d2005-07-06 18:59:04 +00004270 } else {
4271 BuildMI(BB, X86::FpSETRESULT, 1).addReg(Tmp1);
4272 }
4273 break;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004274 case MVT::i32:
Nate Begemanf63be7d2005-07-06 18:59:04 +00004275 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
4276 break;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004277 }
4278 break;
4279 case 1:
Chris Lattner11333092005-01-11 03:11:44 +00004280 Select(N.getOperand(0));
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004281 break;
4282 }
Chris Lattner3648c672005-05-13 21:44:04 +00004283 if (X86Lowering.getBytesToPopOnReturn() == 0)
4284 BuildMI(BB, X86::RET, 0); // Just emit a 'ret' instruction
4285 else
4286 BuildMI(BB, X86::RETI, 1).addImm(X86Lowering.getBytesToPopOnReturn());
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004287 return;
4288 case ISD::BR: {
4289 Select(N.getOperand(0));
4290 MachineBasicBlock *Dest =
4291 cast<BasicBlockSDNode>(N.getOperand(1))->getBasicBlock();
4292 BuildMI(BB, X86::JMP, 1).addMBB(Dest);
4293 return;
4294 }
4295
4296 case ISD::BRCOND: {
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004297 MachineBasicBlock *Dest =
4298 cast<BasicBlockSDNode>(N.getOperand(2))->getBasicBlock();
Chris Lattner11333092005-01-11 03:11:44 +00004299
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004300 // Try to fold a setcc into the branch. If this fails, emit a test/jne
4301 // pair.
Chris Lattner6c07aee2005-01-11 04:06:27 +00004302 if (EmitBranchCC(Dest, N.getOperand(0), N.getOperand(1))) {
4303 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
4304 Select(N.getOperand(0));
4305 Tmp1 = SelectExpr(N.getOperand(1));
4306 } else {
4307 Tmp1 = SelectExpr(N.getOperand(1));
4308 Select(N.getOperand(0));
4309 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004310 BuildMI(BB, X86::TEST8rr, 2).addReg(Tmp1).addReg(Tmp1);
4311 BuildMI(BB, X86::JNE, 1).addMBB(Dest);
4312 }
Chris Lattner11333092005-01-11 03:11:44 +00004313
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004314 return;
4315 }
Chris Lattnere9ef81d2005-01-15 05:22:24 +00004316
Chris Lattner4df0de92005-01-17 00:00:33 +00004317 case ISD::LOAD:
4318 // If this load could be folded into the only using instruction, and if it
4319 // is safe to emit the instruction here, try to do so now.
4320 if (Node->hasNUsesOfValue(1, 0)) {
4321 SDOperand TheVal = N.getValue(0);
4322 SDNode *User = 0;
4323 for (SDNode::use_iterator UI = Node->use_begin(); ; ++UI) {
4324 assert(UI != Node->use_end() && "Didn't find use!");
4325 SDNode *UN = *UI;
4326 for (unsigned i = 0, e = UN->getNumOperands(); i != e; ++i)
4327 if (UN->getOperand(i) == TheVal) {
4328 User = UN;
4329 goto FoundIt;
4330 }
4331 }
4332 FoundIt:
4333 // Only handle unary operators right now.
4334 if (User->getNumOperands() == 1) {
Chris Lattner4a108662005-01-18 03:51:59 +00004335 ExprMap.erase(N);
Chris Lattner4df0de92005-01-17 00:00:33 +00004336 SelectExpr(SDOperand(User, 0));
4337 return;
4338 }
4339 }
Chris Lattnerb71f8fc2005-01-18 04:00:54 +00004340 ExprMap.erase(N);
Chris Lattner4df0de92005-01-17 00:00:33 +00004341 SelectExpr(N);
4342 return;
Chris Lattner966cdfb2005-05-09 21:17:38 +00004343 case ISD::READPORT:
Chris Lattnere9ef81d2005-01-15 05:22:24 +00004344 case ISD::EXTLOAD:
4345 case ISD::SEXTLOAD:
4346 case ISD::ZEXTLOAD:
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004347 case ISD::DYNAMIC_STACKALLOC:
Chris Lattner239738a2005-05-14 08:48:15 +00004348 case X86ISD::TAILCALL:
4349 case X86ISD::CALL:
Chris Lattnerb71f8fc2005-01-18 04:00:54 +00004350 ExprMap.erase(N);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004351 SelectExpr(N);
4352 return;
Chris Lattnerc6f41812005-05-12 23:06:28 +00004353 case ISD::CopyFromReg:
Chris Lattner67649df2005-05-14 06:52:07 +00004354 case X86ISD::FILD64m:
Chris Lattnerc6f41812005-05-12 23:06:28 +00004355 ExprMap.erase(N);
4356 SelectExpr(N.getValue(0));
4357 return;
Chris Lattner745d5382005-07-29 00:40:01 +00004358
4359 case X86ISD::FISTP64m: {
4360 assert(N.getOperand(1).getValueType() == MVT::f64);
4361 X86AddressMode AM;
4362 Select(N.getOperand(0)); // Select the token chain
4363
4364 unsigned ValReg;
4365 if (getRegPressure(N.getOperand(1)) > getRegPressure(N.getOperand(2))) {
4366 ValReg = SelectExpr(N.getOperand(1));
4367 SelectAddress(N.getOperand(2), AM);
4368 } else {
4369 SelectAddress(N.getOperand(2), AM);
4370 ValReg = SelectExpr(N.getOperand(1));
4371 }
4372 addFullAddress(BuildMI(BB, X86::FISTP64m, 5), AM).addReg(ValReg);
4373 return;
4374 }
Chris Lattnere9ef81d2005-01-15 05:22:24 +00004375
Chris Lattner9fadb4c2005-07-10 00:29:18 +00004376 case ISD::TRUNCSTORE: { // truncstore chain, val, ptr, SRCVALUE, storety
Chris Lattnere9ef81d2005-01-15 05:22:24 +00004377 X86AddressMode AM;
Chris Lattner9fadb4c2005-07-10 00:29:18 +00004378 MVT::ValueType StoredTy = cast<VTSDNode>(N.getOperand(4))->getVT();
Chris Lattnerda2ce112005-01-16 07:34:08 +00004379 assert((StoredTy == MVT::i1 || StoredTy == MVT::f32 ||
4380 StoredTy == MVT::i16 /*FIXME: THIS IS JUST FOR TESTING!*/)
4381 && "Unsupported TRUNCSTORE for this target!");
4382
4383 if (StoredTy == MVT::i16) {
4384 // FIXME: This is here just to allow testing. X86 doesn't really have a
4385 // TRUNCSTORE i16 operation, but this is required for targets that do not
4386 // have 16-bit integer registers. We occasionally disable 16-bit integer
4387 // registers to test the promotion code.
4388 Select(N.getOperand(0));
4389 Tmp1 = SelectExpr(N.getOperand(1));
4390 SelectAddress(N.getOperand(2), AM);
4391
4392 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
4393 addFullAddress(BuildMI(BB, X86::MOV16mr, 5), AM).addReg(X86::AX);
4394 return;
4395 }
Chris Lattnere9ef81d2005-01-15 05:22:24 +00004396
4397 // Store of constant bool?
4398 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
4399 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(2))) {
4400 Select(N.getOperand(0));
4401 SelectAddress(N.getOperand(2), AM);
4402 } else {
4403 SelectAddress(N.getOperand(2), AM);
4404 Select(N.getOperand(0));
4405 }
4406 addFullAddress(BuildMI(BB, X86::MOV8mi, 5), AM).addImm(CN->getValue());
4407 return;
4408 }
4409
4410 switch (StoredTy) {
4411 default: assert(0 && "Cannot truncstore this type!");
4412 case MVT::i1: Opc = X86::MOV8mr; break;
Nate Begemanf63be7d2005-07-06 18:59:04 +00004413 case MVT::f32:
Jeff Cohen00b168892005-07-27 06:12:32 +00004414 assert(!X86ScalarSSE && "Cannot truncstore scalar SSE regs");
Nate Begemanf63be7d2005-07-06 18:59:04 +00004415 Opc = X86::FST32m; break;
Chris Lattnere9ef81d2005-01-15 05:22:24 +00004416 }
Misha Brukman0e0a7a452005-04-21 23:38:14 +00004417
Chris Lattnere9ef81d2005-01-15 05:22:24 +00004418 std::vector<std::pair<unsigned, unsigned> > RP;
4419 RP.push_back(std::make_pair(getRegPressure(N.getOperand(0)), 0));
4420 RP.push_back(std::make_pair(getRegPressure(N.getOperand(1)), 1));
4421 RP.push_back(std::make_pair(getRegPressure(N.getOperand(2)), 2));
4422 std::sort(RP.begin(), RP.end());
4423
Chris Lattner572dd082005-02-23 05:57:21 +00004424 Tmp1 = 0; // Silence a warning.
Chris Lattnere9ef81d2005-01-15 05:22:24 +00004425 for (unsigned i = 0; i != 3; ++i)
4426 switch (RP[2-i].second) {
4427 default: assert(0 && "Unknown operand number!");
4428 case 0: Select(N.getOperand(0)); break;
4429 case 1: Tmp1 = SelectExpr(N.getOperand(1)); break;
4430 case 2: SelectAddress(N.getOperand(2), AM); break;
4431 }
4432
4433 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addReg(Tmp1);
4434 return;
4435 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004436 case ISD::STORE: {
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004437 X86AddressMode AM;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004438
4439 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
4440 Opc = 0;
4441 switch (CN->getValueType(0)) {
4442 default: assert(0 && "Invalid type for operation!");
4443 case MVT::i1:
4444 case MVT::i8: Opc = X86::MOV8mi; break;
4445 case MVT::i16: Opc = X86::MOV16mi; break;
4446 case MVT::i32: Opc = X86::MOV32mi; break;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004447 }
4448 if (Opc) {
Chris Lattner11333092005-01-11 03:11:44 +00004449 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(2))) {
4450 Select(N.getOperand(0));
4451 SelectAddress(N.getOperand(2), AM);
4452 } else {
4453 SelectAddress(N.getOperand(2), AM);
4454 Select(N.getOperand(0));
4455 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004456 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addImm(CN->getValue());
4457 return;
4458 }
Chris Lattner75f354b2005-04-21 19:03:24 +00004459 } else if (GlobalAddressSDNode *GA =
4460 dyn_cast<GlobalAddressSDNode>(N.getOperand(1))) {
4461 assert(GA->getValueType(0) == MVT::i32 && "Bad pointer operand");
4462
4463 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(2))) {
4464 Select(N.getOperand(0));
4465 SelectAddress(N.getOperand(2), AM);
4466 } else {
4467 SelectAddress(N.getOperand(2), AM);
4468 Select(N.getOperand(0));
4469 }
Nate Begeman16b04f32005-07-15 00:38:55 +00004470 GlobalValue *GV = GA->getGlobal();
4471 // For Darwin, external and weak symbols are indirect, so we want to load
4472 // the value at address GV, not the value of GV itself.
Jeff Cohen00b168892005-07-27 06:12:32 +00004473 if (Subtarget->getIndirectExternAndWeakGlobals() &&
Nate Begeman16b04f32005-07-15 00:38:55 +00004474 (GV->hasWeakLinkage() || GV->isExternal())) {
4475 Tmp1 = MakeReg(MVT::i32);
4476 BuildMI(BB, X86::MOV32rm, 4, Tmp1).addReg(0).addZImm(1).addReg(0)
4477 .addGlobalAddress(GV, false, 0);
4478 addFullAddress(BuildMI(BB, X86::MOV32mr, 4+1),AM).addReg(Tmp1);
4479 } else {
4480 addFullAddress(BuildMI(BB, X86::MOV32mi, 4+1),AM).addGlobalAddress(GV);
4481 }
Chris Lattner75f354b2005-04-21 19:03:24 +00004482 return;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004483 }
Chris Lattner837caa72005-01-11 23:21:30 +00004484
4485 // Check to see if this is a load/op/store combination.
Chris Lattnere10269b2005-01-17 19:25:26 +00004486 if (TryToFoldLoadOpStore(Node))
4487 return;
Chris Lattner837caa72005-01-11 23:21:30 +00004488
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004489 switch (N.getOperand(1).getValueType()) {
4490 default: assert(0 && "Cannot store this type!");
4491 case MVT::i1:
4492 case MVT::i8: Opc = X86::MOV8mr; break;
4493 case MVT::i16: Opc = X86::MOV16mr; break;
4494 case MVT::i32: Opc = X86::MOV32mr; break;
Nate Begemanf63be7d2005-07-06 18:59:04 +00004495 case MVT::f32: Opc = X86::MOVSSmr; break;
4496 case MVT::f64: Opc = X86ScalarSSE ? X86::MOVSDmr : X86::FST64m; break;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004497 }
Misha Brukman0e0a7a452005-04-21 23:38:14 +00004498
Chris Lattner11333092005-01-11 03:11:44 +00004499 std::vector<std::pair<unsigned, unsigned> > RP;
4500 RP.push_back(std::make_pair(getRegPressure(N.getOperand(0)), 0));
4501 RP.push_back(std::make_pair(getRegPressure(N.getOperand(1)), 1));
4502 RP.push_back(std::make_pair(getRegPressure(N.getOperand(2)), 2));
4503 std::sort(RP.begin(), RP.end());
4504
Chris Lattner572dd082005-02-23 05:57:21 +00004505 Tmp1 = 0; // Silence a warning.
Chris Lattner11333092005-01-11 03:11:44 +00004506 for (unsigned i = 0; i != 3; ++i)
4507 switch (RP[2-i].second) {
4508 default: assert(0 && "Unknown operand number!");
4509 case 0: Select(N.getOperand(0)); break;
4510 case 1: Tmp1 = SelectExpr(N.getOperand(1)); break;
Chris Lattnera3aa2e22005-01-11 03:37:59 +00004511 case 2: SelectAddress(N.getOperand(2), AM); break;
Chris Lattner11333092005-01-11 03:11:44 +00004512 }
4513
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004514 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addReg(Tmp1);
4515 return;
4516 }
Chris Lattner16cd04d2005-05-12 23:24:06 +00004517 case ISD::CALLSEQ_START:
Chris Lattner3648c672005-05-13 21:44:04 +00004518 Select(N.getOperand(0));
4519 // Stack amount
4520 Tmp1 = cast<ConstantSDNode>(N.getOperand(1))->getValue();
4521 BuildMI(BB, X86::ADJCALLSTACKDOWN, 1).addImm(Tmp1);
4522 return;
Chris Lattner16cd04d2005-05-12 23:24:06 +00004523 case ISD::CALLSEQ_END:
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004524 Select(N.getOperand(0));
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004525 return;
Chris Lattner989de032005-01-11 06:14:36 +00004526 case ISD::MEMSET: {
4527 Select(N.getOperand(0)); // Select the chain.
4528 unsigned Align =
4529 (unsigned)cast<ConstantSDNode>(Node->getOperand(4))->getValue();
4530 if (Align == 0) Align = 1;
4531
4532 // Turn the byte code into # iterations
4533 unsigned CountReg;
4534 unsigned Opcode;
4535 if (ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Node->getOperand(2))) {
4536 unsigned Val = ValC->getValue() & 255;
4537
4538 // If the value is a constant, then we can potentially use larger sets.
4539 switch (Align & 3) {
4540 case 2: // WORD aligned
4541 CountReg = MakeReg(MVT::i32);
4542 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
4543 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/2);
4544 } else {
4545 unsigned ByteReg = SelectExpr(Node->getOperand(3));
4546 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
4547 }
4548 BuildMI(BB, X86::MOV16ri, 1, X86::AX).addImm((Val << 8) | Val);
4549 Opcode = X86::REP_STOSW;
4550 break;
4551 case 0: // DWORD aligned
4552 CountReg = MakeReg(MVT::i32);
4553 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
4554 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/4);
4555 } else {
4556 unsigned ByteReg = SelectExpr(Node->getOperand(3));
4557 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
4558 }
4559 Val = (Val << 8) | Val;
4560 BuildMI(BB, X86::MOV32ri, 1, X86::EAX).addImm((Val << 16) | Val);
4561 Opcode = X86::REP_STOSD;
4562 break;
4563 default: // BYTE aligned
4564 CountReg = SelectExpr(Node->getOperand(3));
4565 BuildMI(BB, X86::MOV8ri, 1, X86::AL).addImm(Val);
4566 Opcode = X86::REP_STOSB;
4567 break;
4568 }
4569 } else {
4570 // If it's not a constant value we are storing, just fall back. We could
4571 // try to be clever to form 16 bit and 32 bit values, but we don't yet.
4572 unsigned ValReg = SelectExpr(Node->getOperand(2));
4573 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(ValReg);
4574 CountReg = SelectExpr(Node->getOperand(3));
4575 Opcode = X86::REP_STOSB;
4576 }
4577
4578 // No matter what the alignment is, we put the source in ESI, the
4579 // destination in EDI, and the count in ECX.
4580 unsigned TmpReg1 = SelectExpr(Node->getOperand(1));
4581 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
4582 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
4583 BuildMI(BB, Opcode, 0);
4584 return;
4585 }
Chris Lattner966cdfb2005-05-09 21:17:38 +00004586 case ISD::MEMCPY: {
Chris Lattner31805bf2005-01-11 06:19:26 +00004587 Select(N.getOperand(0)); // Select the chain.
4588 unsigned Align =
4589 (unsigned)cast<ConstantSDNode>(Node->getOperand(4))->getValue();
4590 if (Align == 0) Align = 1;
4591
4592 // Turn the byte code into # iterations
4593 unsigned CountReg;
4594 unsigned Opcode;
4595 switch (Align & 3) {
4596 case 2: // WORD aligned
4597 CountReg = MakeReg(MVT::i32);
4598 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
4599 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/2);
4600 } else {
4601 unsigned ByteReg = SelectExpr(Node->getOperand(3));
4602 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
4603 }
4604 Opcode = X86::REP_MOVSW;
4605 break;
4606 case 0: // DWORD aligned
4607 CountReg = MakeReg(MVT::i32);
4608 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
4609 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/4);
4610 } else {
4611 unsigned ByteReg = SelectExpr(Node->getOperand(3));
4612 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
4613 }
4614 Opcode = X86::REP_MOVSD;
4615 break;
4616 default: // BYTE aligned
4617 CountReg = SelectExpr(Node->getOperand(3));
4618 Opcode = X86::REP_MOVSB;
4619 break;
4620 }
4621
4622 // No matter what the alignment is, we put the source in ESI, the
4623 // destination in EDI, and the count in ECX.
4624 unsigned TmpReg1 = SelectExpr(Node->getOperand(1));
4625 unsigned TmpReg2 = SelectExpr(Node->getOperand(2));
4626 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
4627 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
4628 BuildMI(BB, X86::MOV32rr, 1, X86::ESI).addReg(TmpReg2);
4629 BuildMI(BB, Opcode, 0);
4630 return;
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004631 }
Chris Lattner966cdfb2005-05-09 21:17:38 +00004632 case ISD::WRITEPORT:
4633 if (Node->getOperand(2).getValueType() != MVT::i16) {
4634 std::cerr << "llvm.writeport: Address size is not 16 bits\n";
4635 exit(1);
4636 }
4637 Select(Node->getOperand(0)); // Emit the chain.
4638
4639 Tmp1 = SelectExpr(Node->getOperand(1));
4640 switch (Node->getOperand(1).getValueType()) {
4641 case MVT::i8:
4642 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(Tmp1);
4643 Tmp2 = X86::OUT8ir; Opc = X86::OUT8rr;
4644 break;
4645 case MVT::i16:
4646 BuildMI(BB, X86::MOV16rr, 1, X86::AX).addReg(Tmp1);
4647 Tmp2 = X86::OUT16ir; Opc = X86::OUT16rr;
4648 break;
4649 case MVT::i32:
4650 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
4651 Tmp2 = X86::OUT32ir; Opc = X86::OUT32rr;
4652 break;
4653 default:
4654 std::cerr << "llvm.writeport: invalid data type for X86 target";
4655 exit(1);
4656 }
4657
4658 // If the port is a single-byte constant, use the immediate form.
4659 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Node->getOperand(2)))
4660 if ((CN->getValue() & 255) == CN->getValue()) {
4661 BuildMI(BB, Tmp2, 1).addImm(CN->getValue());
4662 return;
4663 }
4664
4665 // Otherwise, move the I/O port address into the DX register.
4666 unsigned Reg = SelectExpr(Node->getOperand(2));
4667 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(Reg);
4668 BuildMI(BB, Opc, 0);
4669 return;
4670 }
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004671 assert(0 && "Should not be reached!");
4672}
4673
4674
4675/// createX86PatternInstructionSelector - This pass converts an LLVM function
4676/// into a machine code representation using pattern matching and a machine
4677/// description file.
4678///
4679FunctionPass *llvm::createX86PatternInstructionSelector(TargetMachine &TM) {
Misha Brukman0e0a7a452005-04-21 23:38:14 +00004680 return new ISel(TM);
Chris Lattner8acb1ba2005-01-07 07:49:41 +00004681}