blob: dfd7aa590445c02a8b98f713125309166c0fd12f [file] [log] [blame]
Chris Lattnera58f5592006-05-23 23:20:42 +00001//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
Chris Lattner76ac0682005-11-15 00:40:23 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file was developed by Chris Lattner and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the interfaces that X86 uses to lower LLVM code into a
11// selection DAG.
12//
13//===----------------------------------------------------------------------===//
14
15#include "X86.h"
Evan Cheng911c68d2006-01-16 21:21:29 +000016#include "X86InstrBuilder.h"
Chris Lattner76ac0682005-11-15 00:40:23 +000017#include "X86ISelLowering.h"
Evan Chengdc614c12006-06-06 23:30:24 +000018#include "X86MachineFunctionInfo.h"
Chris Lattner76ac0682005-11-15 00:40:23 +000019#include "X86TargetMachine.h"
20#include "llvm/CallingConv.h"
Evan Cheng72d5c252006-01-31 22:28:30 +000021#include "llvm/Constants.h"
Evan Cheng88decde2006-04-28 21:29:37 +000022#include "llvm/DerivedTypes.h"
Chris Lattner76ac0682005-11-15 00:40:23 +000023#include "llvm/Function.h"
Evan Cheng78038292006-04-05 23:38:46 +000024#include "llvm/Intrinsics.h"
Evan Chengaf598d22006-03-13 23:18:16 +000025#include "llvm/ADT/VectorExtras.h"
26#include "llvm/Analysis/ScalarEvolutionExpressions.h"
Chris Lattner76ac0682005-11-15 00:40:23 +000027#include "llvm/CodeGen/MachineFrameInfo.h"
Evan Cheng339edad2006-01-11 00:33:36 +000028#include "llvm/CodeGen/MachineFunction.h"
29#include "llvm/CodeGen/MachineInstrBuilder.h"
Chris Lattner76ac0682005-11-15 00:40:23 +000030#include "llvm/CodeGen/SelectionDAG.h"
31#include "llvm/CodeGen/SSARegMap.h"
Evan Cheng2dd217b2006-01-31 03:14:29 +000032#include "llvm/Support/MathExtras.h"
Chris Lattner76ac0682005-11-15 00:40:23 +000033#include "llvm/Target/TargetOptions.h"
34using namespace llvm;
35
36// FIXME: temporary.
37#include "llvm/Support/CommandLine.h"
38static cl::opt<bool> EnableFastCC("enable-x86-fastcc", cl::Hidden,
39 cl::desc("Enable fastcc on X86"));
40
41X86TargetLowering::X86TargetLowering(TargetMachine &TM)
42 : TargetLowering(TM) {
Evan Chengcde9e302006-01-27 08:10:46 +000043 Subtarget = &TM.getSubtarget<X86Subtarget>();
44 X86ScalarSSE = Subtarget->hasSSE2();
Evan Cheng11b0a5d2006-09-08 06:48:29 +000045 X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP;
Evan Chengcde9e302006-01-27 08:10:46 +000046
Chris Lattner76ac0682005-11-15 00:40:23 +000047 // Set up the TargetLowering object.
48
49 // X86 is weird, it always uses i8 for shift amounts and setcc results.
50 setShiftAmountType(MVT::i8);
51 setSetCCResultType(MVT::i8);
52 setSetCCResultContents(ZeroOrOneSetCCResult);
Evan Cheng83eeefb2006-01-25 09:15:17 +000053 setSchedulingPreference(SchedulingForRegPressure);
Chris Lattner76ac0682005-11-15 00:40:23 +000054 setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0
Evan Cheng11b0a5d2006-09-08 06:48:29 +000055 setStackPointerRegisterToSaveRestore(X86StackPtr);
Evan Cheng20931a72006-03-16 21:47:42 +000056
Evan Chengbc047222006-03-22 19:22:18 +000057 if (!Subtarget->isTargetDarwin())
Evan Chengb09a56f2006-03-17 20:31:41 +000058 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
59 setUseUnderscoreSetJmpLongJmp(true);
60
Evan Cheng20931a72006-03-16 21:47:42 +000061 // Add legal addressing mode scale values.
62 addLegalAddressScale(8);
63 addLegalAddressScale(4);
64 addLegalAddressScale(2);
65 // Enter the ones which require both scale + index last. These are more
66 // expensive.
67 addLegalAddressScale(9);
68 addLegalAddressScale(5);
69 addLegalAddressScale(3);
Chris Lattner61c9a8e2006-01-29 06:26:08 +000070
Chris Lattner76ac0682005-11-15 00:40:23 +000071 // Set up the register classes.
Evan Cheng9fee4422006-05-16 07:21:53 +000072 addRegisterClass(MVT::i8, X86::GR8RegisterClass);
73 addRegisterClass(MVT::i16, X86::GR16RegisterClass);
74 addRegisterClass(MVT::i32, X86::GR32RegisterClass);
Evan Cheng11b0a5d2006-09-08 06:48:29 +000075 if (Subtarget->is64Bit())
76 addRegisterClass(MVT::i64, X86::GR64RegisterClass);
Chris Lattner76ac0682005-11-15 00:40:23 +000077
78 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
79 // operation.
80 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
81 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
82 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
Evan Cheng0d5b69f2006-01-17 02:32:49 +000083
Evan Cheng11b0a5d2006-09-08 06:48:29 +000084 if (Subtarget->is64Bit()) {
85 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand);
Evan Cheng0d5b69f2006-01-17 02:32:49 +000086 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
Evan Cheng11b0a5d2006-09-08 06:48:29 +000087 } else {
88 if (X86ScalarSSE)
89 // If SSE i64 SINT_TO_FP is not available, expand i32 UINT_TO_FP.
90 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand);
91 else
92 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
93 }
Chris Lattner76ac0682005-11-15 00:40:23 +000094
95 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
96 // this operation.
97 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
98 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
Nate Begeman7e5496d2006-02-17 00:03:04 +000099 // SSE has no i16 to fp conversion, only i32
Evan Cheng08390f62006-01-30 22:13:22 +0000100 if (X86ScalarSSE)
Evan Cheng08390f62006-01-30 22:13:22 +0000101 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
Evan Cheng593bea72006-02-17 07:01:52 +0000102 else {
103 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
104 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
105 }
Chris Lattner76ac0682005-11-15 00:40:23 +0000106
Evan Cheng11b0a5d2006-09-08 06:48:29 +0000107 if (!Subtarget->is64Bit()) {
108 // Custom lower SINT_TO_FP and FP_TO_SINT from/to i64 in 32-bit mode.
109 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
110 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
111 }
Evan Cheng5b97fcf2006-01-30 08:02:57 +0000112
Evan Cheng08390f62006-01-30 22:13:22 +0000113 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
114 // this operation.
115 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
116 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
117
118 if (X86ScalarSSE) {
119 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
120 } else {
Chris Lattner76ac0682005-11-15 00:40:23 +0000121 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
Evan Cheng08390f62006-01-30 22:13:22 +0000122 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
Chris Lattner76ac0682005-11-15 00:40:23 +0000123 }
124
125 // Handle FP_TO_UINT by promoting the destination to a larger signed
126 // conversion.
127 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
128 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
129 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
130
Evan Cheng11b0a5d2006-09-08 06:48:29 +0000131 if (Subtarget->is64Bit()) {
132 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
Chris Lattner76ac0682005-11-15 00:40:23 +0000133 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
Evan Cheng11b0a5d2006-09-08 06:48:29 +0000134 } else {
135 if (X86ScalarSSE && !Subtarget->hasSSE3())
136 // Expand FP_TO_UINT into a select.
137 // FIXME: We would like to use a Custom expander here eventually to do
138 // the optimal thing for SSE vs. the default expansion in the legalizer.
139 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
140 else
141 // With SSE3 we can use fisttpll to convert to a signed i64.
142 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
143 }
Chris Lattner76ac0682005-11-15 00:40:23 +0000144
Evan Cheng08390f62006-01-30 22:13:22 +0000145 setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand);
146 setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand);
Chris Lattner30107e62005-12-23 05:15:23 +0000147
Evan Cheng593bea72006-02-17 07:01:52 +0000148 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
Nate Begeman7e7f4392006-02-01 07:19:44 +0000149 setOperationAction(ISD::BR_CC , MVT::Other, Expand);
150 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand);
Chris Lattner76ac0682005-11-15 00:40:23 +0000151 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand);
Evan Cheng11b0a5d2006-09-08 06:48:29 +0000152 if (Subtarget->is64Bit())
153 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Expand);
Chris Lattner76ac0682005-11-15 00:40:23 +0000154 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Expand);
Chris Lattner32257332005-12-07 17:59:14 +0000155 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand);
Chris Lattner76ac0682005-11-15 00:40:23 +0000156 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
157 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
158 setOperationAction(ISD::SEXTLOAD , MVT::i1 , Expand);
159 setOperationAction(ISD::FREM , MVT::f64 , Expand);
Evan Cheng11b0a5d2006-09-08 06:48:29 +0000160
Chris Lattner76ac0682005-11-15 00:40:23 +0000161 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
162 setOperationAction(ISD::CTTZ , MVT::i8 , Expand);
163 setOperationAction(ISD::CTLZ , MVT::i8 , Expand);
164 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
165 setOperationAction(ISD::CTTZ , MVT::i16 , Expand);
166 setOperationAction(ISD::CTLZ , MVT::i16 , Expand);
167 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
168 setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
169 setOperationAction(ISD::CTLZ , MVT::i32 , Expand);
Evan Cheng11b0a5d2006-09-08 06:48:29 +0000170 if (Subtarget->is64Bit()) {
171 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
172 setOperationAction(ISD::CTTZ , MVT::i64 , Expand);
173 setOperationAction(ISD::CTLZ , MVT::i64 , Expand);
174 }
175
Andrew Lenharth0bf68ae2005-11-20 21:41:10 +0000176 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
Nate Begeman2fba8a32006-01-14 03:14:10 +0000177 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
Nate Begeman1b8121b2006-01-11 21:21:00 +0000178
Chris Lattner76ac0682005-11-15 00:40:23 +0000179 // These should be promoted to a larger select which is supported.
180 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
181 setOperationAction(ISD::SELECT , MVT::i8 , Promote);
Nate Begeman7e5496d2006-02-17 00:03:04 +0000182 // X86 wants to expand cmov itself.
Evan Cheng593bea72006-02-17 07:01:52 +0000183 setOperationAction(ISD::SELECT , MVT::i16 , Custom);
184 setOperationAction(ISD::SELECT , MVT::i32 , Custom);
185 setOperationAction(ISD::SELECT , MVT::f32 , Custom);
186 setOperationAction(ISD::SELECT , MVT::f64 , Custom);
187 setOperationAction(ISD::SETCC , MVT::i8 , Custom);
188 setOperationAction(ISD::SETCC , MVT::i16 , Custom);
189 setOperationAction(ISD::SETCC , MVT::i32 , Custom);
190 setOperationAction(ISD::SETCC , MVT::f32 , Custom);
191 setOperationAction(ISD::SETCC , MVT::f64 , Custom);
Evan Cheng11b0a5d2006-09-08 06:48:29 +0000192 if (Subtarget->is64Bit()) {
193 setOperationAction(ISD::SELECT , MVT::i64 , Custom);
194 setOperationAction(ISD::SETCC , MVT::i64 , Custom);
195 }
Nate Begeman7e5496d2006-02-17 00:03:04 +0000196 // X86 ret instruction may pop stack.
Evan Cheng593bea72006-02-17 07:01:52 +0000197 setOperationAction(ISD::RET , MVT::Other, Custom);
Nate Begeman7e5496d2006-02-17 00:03:04 +0000198 // Darwin ABI issue.
Evan Cheng5588de92006-02-18 00:15:05 +0000199 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
Nate Begeman4ca2ea52006-04-22 18:53:45 +0000200 setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
Evan Cheng593bea72006-02-17 07:01:52 +0000201 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
Evan Chenge0ed6ec2006-02-23 20:41:18 +0000202 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
Evan Cheng11b0a5d2006-09-08 06:48:29 +0000203 if (Subtarget->is64Bit()) {
204 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom);
205 setOperationAction(ISD::JumpTable , MVT::i64 , Custom);
206 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
207 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom);
208 }
Nate Begeman7e5496d2006-02-17 00:03:04 +0000209 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
Evan Cheng593bea72006-02-17 07:01:52 +0000210 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
211 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
212 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
Nate Begeman7e5496d2006-02-17 00:03:04 +0000213 // X86 wants to expand memset / memcpy itself.
Evan Cheng593bea72006-02-17 07:01:52 +0000214 setOperationAction(ISD::MEMSET , MVT::Other, Custom);
215 setOperationAction(ISD::MEMCPY , MVT::Other, Custom);
Chris Lattner76ac0682005-11-15 00:40:23 +0000216
Chris Lattner9c415362005-11-29 06:16:21 +0000217 // We don't have line number support yet.
218 setOperationAction(ISD::LOCATION, MVT::Other, Expand);
Jim Laskeydeeafa02006-01-05 01:47:43 +0000219 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
Evan Cheng30d7b702006-03-07 02:02:57 +0000220 // FIXME - use subtarget debug flags
Evan Chengbc047222006-03-22 19:22:18 +0000221 if (!Subtarget->isTargetDarwin())
Evan Cheng30d7b702006-03-07 02:02:57 +0000222 setOperationAction(ISD::DEBUG_LABEL, MVT::Other, Expand);
Chris Lattner9c415362005-11-29 06:16:21 +0000223
Nate Begemane74795c2006-01-25 18:21:52 +0000224 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
225 setOperationAction(ISD::VASTART , MVT::Other, Custom);
226
227 // Use the default implementation.
228 setOperationAction(ISD::VAARG , MVT::Other, Expand);
229 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
230 setOperationAction(ISD::VAEND , MVT::Other, Expand);
Chris Lattner78c358d2006-01-15 09:00:21 +0000231 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
232 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
Evan Cheng11b0a5d2006-09-08 06:48:29 +0000233 if (Subtarget->is64Bit())
234 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
Chris Lattner78c358d2006-01-15 09:00:21 +0000235 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
Chris Lattner8e2f52e2006-01-13 02:42:53 +0000236
Chris Lattner9c7f5032006-03-05 05:08:37 +0000237 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
238 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
239
Chris Lattner76ac0682005-11-15 00:40:23 +0000240 if (X86ScalarSSE) {
241 // Set up the FP register classes.
Evan Cheng84dc9b52006-01-12 08:27:59 +0000242 addRegisterClass(MVT::f32, X86::FR32RegisterClass);
243 addRegisterClass(MVT::f64, X86::FR64RegisterClass);
Chris Lattner76ac0682005-11-15 00:40:23 +0000244
Evan Cheng72d5c252006-01-31 22:28:30 +0000245 // Use ANDPD to simulate FABS.
246 setOperationAction(ISD::FABS , MVT::f64, Custom);
247 setOperationAction(ISD::FABS , MVT::f32, Custom);
248
249 // Use XORP to simulate FNEG.
250 setOperationAction(ISD::FNEG , MVT::f64, Custom);
251 setOperationAction(ISD::FNEG , MVT::f32, Custom);
252
Evan Chengd8fba3a2006-02-02 00:28:23 +0000253 // We don't support sin/cos/fmod
Chris Lattner76ac0682005-11-15 00:40:23 +0000254 setOperationAction(ISD::FSIN , MVT::f64, Expand);
255 setOperationAction(ISD::FCOS , MVT::f64, Expand);
Chris Lattner76ac0682005-11-15 00:40:23 +0000256 setOperationAction(ISD::FREM , MVT::f64, Expand);
257 setOperationAction(ISD::FSIN , MVT::f32, Expand);
258 setOperationAction(ISD::FCOS , MVT::f32, Expand);
Chris Lattner76ac0682005-11-15 00:40:23 +0000259 setOperationAction(ISD::FREM , MVT::f32, Expand);
260
Chris Lattner61c9a8e2006-01-29 06:26:08 +0000261 // Expand FP immediates into loads from the stack, except for the special
262 // cases we handle.
263 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
264 setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
Chris Lattner76ac0682005-11-15 00:40:23 +0000265 addLegalFPImmediate(+0.0); // xorps / xorpd
266 } else {
267 // Set up the FP register classes.
268 addRegisterClass(MVT::f64, X86::RFPRegisterClass);
Chris Lattner132177e2006-01-29 06:44:22 +0000269
270 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
271
Chris Lattner76ac0682005-11-15 00:40:23 +0000272 if (!UnsafeFPMath) {
273 setOperationAction(ISD::FSIN , MVT::f64 , Expand);
274 setOperationAction(ISD::FCOS , MVT::f64 , Expand);
275 }
276
Chris Lattner61c9a8e2006-01-29 06:26:08 +0000277 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
Chris Lattner76ac0682005-11-15 00:40:23 +0000278 addLegalFPImmediate(+0.0); // FLD0
279 addLegalFPImmediate(+1.0); // FLD1
280 addLegalFPImmediate(-0.0); // FLD0/FCHS
281 addLegalFPImmediate(-1.0); // FLD1/FCHS
282 }
Evan Cheng9e252e32006-02-22 02:26:30 +0000283
Evan Cheng19264272006-03-01 01:11:20 +0000284 // First set operation action for all vector types to expand. Then we
285 // will selectively turn on ones that can be effectively codegen'd.
286 for (unsigned VT = (unsigned)MVT::Vector + 1;
287 VT != (unsigned)MVT::LAST_VALUETYPE; VT++) {
288 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand);
289 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand);
290 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand);
291 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand);
Evan Chengcbffa462006-03-31 19:22:53 +0000292 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand);
Chris Lattner00f46832006-03-21 20:51:05 +0000293 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
Evan Chengcbffa462006-03-31 19:22:53 +0000294 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
Evan Cheng19264272006-03-01 01:11:20 +0000295 }
296
Evan Chengbc047222006-03-22 19:22:18 +0000297 if (Subtarget->hasMMX()) {
Evan Cheng9e252e32006-02-22 02:26:30 +0000298 addRegisterClass(MVT::v8i8, X86::VR64RegisterClass);
299 addRegisterClass(MVT::v4i16, X86::VR64RegisterClass);
300 addRegisterClass(MVT::v2i32, X86::VR64RegisterClass);
301
Evan Cheng19264272006-03-01 01:11:20 +0000302 // FIXME: add MMX packed arithmetics
Evan Chengd5e905d2006-03-21 23:01:21 +0000303 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Expand);
304 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Expand);
305 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Expand);
Evan Cheng9e252e32006-02-22 02:26:30 +0000306 }
307
Evan Chengbc047222006-03-22 19:22:18 +0000308 if (Subtarget->hasSSE1()) {
Evan Cheng9e252e32006-02-22 02:26:30 +0000309 addRegisterClass(MVT::v4f32, X86::VR128RegisterClass);
310
Evan Cheng92232302006-04-12 21:21:57 +0000311 setOperationAction(ISD::AND, MVT::v4f32, Legal);
312 setOperationAction(ISD::OR, MVT::v4f32, Legal);
313 setOperationAction(ISD::XOR, MVT::v4f32, Legal);
Evan Cheng617a6a82006-04-10 07:23:14 +0000314 setOperationAction(ISD::ADD, MVT::v4f32, Legal);
315 setOperationAction(ISD::SUB, MVT::v4f32, Legal);
316 setOperationAction(ISD::MUL, MVT::v4f32, Legal);
317 setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
318 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
319 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
Evan Chengebf10062006-04-03 20:53:28 +0000320 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
Evan Cheng617a6a82006-04-10 07:23:14 +0000321 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
Evan Cheng9e252e32006-02-22 02:26:30 +0000322 }
323
Evan Chengbc047222006-03-22 19:22:18 +0000324 if (Subtarget->hasSSE2()) {
Evan Cheng9e252e32006-02-22 02:26:30 +0000325 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass);
326 addRegisterClass(MVT::v16i8, X86::VR128RegisterClass);
327 addRegisterClass(MVT::v8i16, X86::VR128RegisterClass);
328 addRegisterClass(MVT::v4i32, X86::VR128RegisterClass);
329 addRegisterClass(MVT::v2i64, X86::VR128RegisterClass);
330
Evan Cheng617a6a82006-04-10 07:23:14 +0000331 setOperationAction(ISD::ADD, MVT::v2f64, Legal);
332 setOperationAction(ISD::ADD, MVT::v16i8, Legal);
333 setOperationAction(ISD::ADD, MVT::v8i16, Legal);
334 setOperationAction(ISD::ADD, MVT::v4i32, Legal);
335 setOperationAction(ISD::SUB, MVT::v2f64, Legal);
336 setOperationAction(ISD::SUB, MVT::v16i8, Legal);
337 setOperationAction(ISD::SUB, MVT::v8i16, Legal);
338 setOperationAction(ISD::SUB, MVT::v4i32, Legal);
Evan Chenge4f97cc2006-04-13 05:10:25 +0000339 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
Evan Cheng617a6a82006-04-10 07:23:14 +0000340 setOperationAction(ISD::MUL, MVT::v2f64, Legal);
Evan Cheng92232302006-04-12 21:21:57 +0000341
Evan Cheng617a6a82006-04-10 07:23:14 +0000342 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
343 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
Evan Chengcbffa462006-03-31 19:22:53 +0000344 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
Evan Cheng6e5e2052006-04-17 22:04:06 +0000345 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
346 // Implement v4f32 insert_vector_elt in terms of SSE2 v8i16 ones.
347 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
Evan Cheng617a6a82006-04-10 07:23:14 +0000348
Evan Cheng92232302006-04-12 21:21:57 +0000349 // Custom lower build_vector, vector_shuffle, and extract_vector_elt.
350 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) {
351 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Custom);
352 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Custom);
353 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Custom);
354 }
355 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
356 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
357 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
358 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
359 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
360 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
361
362 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
363 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) {
364 setOperationAction(ISD::AND, (MVT::ValueType)VT, Promote);
365 AddPromotedToType (ISD::AND, (MVT::ValueType)VT, MVT::v2i64);
366 setOperationAction(ISD::OR, (MVT::ValueType)VT, Promote);
367 AddPromotedToType (ISD::OR, (MVT::ValueType)VT, MVT::v2i64);
368 setOperationAction(ISD::XOR, (MVT::ValueType)VT, Promote);
369 AddPromotedToType (ISD::XOR, (MVT::ValueType)VT, MVT::v2i64);
Evan Chenge2157c62006-04-12 17:12:36 +0000370 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Promote);
371 AddPromotedToType (ISD::LOAD, (MVT::ValueType)VT, MVT::v2i64);
Evan Cheng92232302006-04-12 21:21:57 +0000372 setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote);
373 AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v2i64);
Evan Cheng617a6a82006-04-10 07:23:14 +0000374 }
Evan Cheng92232302006-04-12 21:21:57 +0000375
376 // Custom lower v2i64 and v2f64 selects.
377 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
Evan Chenge2157c62006-04-12 17:12:36 +0000378 setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
Evan Cheng617a6a82006-04-10 07:23:14 +0000379 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
Evan Cheng92232302006-04-12 21:21:57 +0000380 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
Evan Cheng9e252e32006-02-22 02:26:30 +0000381 }
382
Evan Cheng78038292006-04-05 23:38:46 +0000383 // We want to custom lower some of our intrinsics.
384 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
385
Evan Cheng5987cfb2006-07-07 08:33:52 +0000386 // We have target-specific dag combine patterns for the following nodes:
387 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
388
Chris Lattner76ac0682005-11-15 00:40:23 +0000389 computeRegisterProperties();
390
Evan Cheng6a374562006-02-14 08:25:08 +0000391 // FIXME: These should be based on subtarget info. Plus, the values should
392 // be smaller when we are in optimizing for size mode.
Evan Cheng4b40a422006-02-14 08:38:30 +0000393 maxStoresPerMemset = 16; // For %llvm.memset -> sequence of stores
394 maxStoresPerMemcpy = 16; // For %llvm.memcpy -> sequence of stores
395 maxStoresPerMemmove = 16; // For %llvm.memmove -> sequence of stores
Chris Lattner76ac0682005-11-15 00:40:23 +0000396 allowUnalignedMemoryAccesses = true; // x86 supports it!
397}
398
Chris Lattner76ac0682005-11-15 00:40:23 +0000399//===----------------------------------------------------------------------===//
400// C Calling Convention implementation
401//===----------------------------------------------------------------------===//
402
Evan Cheng24eb3f42006-04-27 05:35:28 +0000403/// AddLiveIn - This helper function adds the specified physical register to the
404/// MachineFunction as a live in value. It also creates a corresponding virtual
405/// register for it.
406static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg,
407 TargetRegisterClass *RC) {
408 assert(RC->contains(PReg) && "Not the correct regclass!");
409 unsigned VReg = MF.getSSARegMap()->createVirtualRegister(RC);
410 MF.addLiveIn(PReg, VReg);
411 return VReg;
412}
413
Evan Cheng89001ad2006-04-27 08:31:10 +0000414/// HowToPassCCCArgument - Returns how an formal argument of the specified type
415/// should be passed. If it is through stack, returns the size of the stack
Evan Cheng763f9b02006-05-26 18:25:43 +0000416/// slot; if it is through XMM register, returns the number of XMM registers
Evan Cheng89001ad2006-04-27 08:31:10 +0000417/// are needed.
418static void
419HowToPassCCCArgument(MVT::ValueType ObjectVT, unsigned NumXMMRegs,
420 unsigned &ObjSize, unsigned &ObjXMMRegs) {
Evan Cheng2b2c1be2006-06-01 05:53:27 +0000421 ObjXMMRegs = 0;
Evan Cheng8aca43e2006-05-25 23:31:23 +0000422
Evan Cheng48940d12006-04-27 01:32:22 +0000423 switch (ObjectVT) {
424 default: assert(0 && "Unhandled argument type!");
Evan Cheng48940d12006-04-27 01:32:22 +0000425 case MVT::i8: ObjSize = 1; break;
426 case MVT::i16: ObjSize = 2; break;
427 case MVT::i32: ObjSize = 4; break;
428 case MVT::i64: ObjSize = 8; break;
429 case MVT::f32: ObjSize = 4; break;
430 case MVT::f64: ObjSize = 8; break;
Evan Cheng89001ad2006-04-27 08:31:10 +0000431 case MVT::v16i8:
432 case MVT::v8i16:
433 case MVT::v4i32:
434 case MVT::v2i64:
435 case MVT::v4f32:
436 case MVT::v2f64:
Evan Chengbfb5ea62006-05-26 19:22:06 +0000437 if (NumXMMRegs < 4)
Evan Cheng89001ad2006-04-27 08:31:10 +0000438 ObjXMMRegs = 1;
439 else
440 ObjSize = 16;
441 break;
Evan Cheng48940d12006-04-27 01:32:22 +0000442 }
Evan Cheng48940d12006-04-27 01:32:22 +0000443}
444
Evan Cheng17e734f2006-05-23 21:06:34 +0000445SDOperand X86TargetLowering::LowerCCCArguments(SDOperand Op, SelectionDAG &DAG) {
446 unsigned NumArgs = Op.Val->getNumValues() - 1;
Evan Chenge0bcfbe2006-04-26 01:20:17 +0000447 MachineFunction &MF = DAG.getMachineFunction();
448 MachineFrameInfo *MFI = MF.getFrameInfo();
Evan Cheng17e734f2006-05-23 21:06:34 +0000449 SDOperand Root = Op.getOperand(0);
450 std::vector<SDOperand> ArgValues;
Chris Lattner76ac0682005-11-15 00:40:23 +0000451
Evan Cheng48940d12006-04-27 01:32:22 +0000452 // Add DAG nodes to load the arguments... On entry to a function on the X86,
453 // the stack frame looks like this:
454 //
455 // [ESP] -- return address
456 // [ESP + 4] -- first argument (leftmost lexically)
Evan Chengcbfb3d02006-05-26 18:37:16 +0000457 // [ESP + 8] -- second argument, if first argument is <= 4 bytes in size
Evan Cheng48940d12006-04-27 01:32:22 +0000458 // ...
459 //
Evan Chenge0bcfbe2006-04-26 01:20:17 +0000460 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
Evan Cheng89001ad2006-04-27 08:31:10 +0000461 unsigned NumXMMRegs = 0; // XMM regs used for parameter passing.
Evan Chengbfb5ea62006-05-26 19:22:06 +0000462 static const unsigned XMMArgRegs[] = {
463 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3
464 };
Evan Chenge0bcfbe2006-04-26 01:20:17 +0000465 for (unsigned i = 0; i < NumArgs; ++i) {
Evan Cheng17e734f2006-05-23 21:06:34 +0000466 MVT::ValueType ObjectVT = Op.getValue(i).getValueType();
467 unsigned ArgIncrement = 4;
468 unsigned ObjSize = 0;
469 unsigned ObjXMMRegs = 0;
470 HowToPassCCCArgument(ObjectVT, NumXMMRegs, ObjSize, ObjXMMRegs);
Evan Chenga01e7992006-05-26 18:39:59 +0000471 if (ObjSize > 4)
Evan Cheng17e734f2006-05-23 21:06:34 +0000472 ArgIncrement = ObjSize;
Evan Cheng48940d12006-04-27 01:32:22 +0000473
Evan Cheng17e734f2006-05-23 21:06:34 +0000474 SDOperand ArgValue;
475 if (ObjXMMRegs) {
476 // Passed in a XMM register.
477 unsigned Reg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs],
Evan Cheng11b0a5d2006-09-08 06:48:29 +0000478 X86::VR128RegisterClass);
Evan Cheng17e734f2006-05-23 21:06:34 +0000479 ArgValue= DAG.getCopyFromReg(Root, Reg, ObjectVT);
480 ArgValues.push_back(ArgValue);
481 NumXMMRegs += ObjXMMRegs;
482 } else {
Evan Chengb92f4182006-05-26 20:37:47 +0000483 // XMM arguments have to be aligned on 16-byte boundary.
484 if (ObjSize == 16)
485 ArgOffset = ((ArgOffset + 15) / 16) * 16;
Evan Cheng17e734f2006-05-23 21:06:34 +0000486 // Create the frame index object for this incoming parameter...
487 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
488 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy());
489 ArgValue = DAG.getLoad(Op.Val->getValueType(i), Root, FIN,
490 DAG.getSrcValue(NULL));
491 ArgValues.push_back(ArgValue);
492 ArgOffset += ArgIncrement; // Move on to the next argument...
Evan Chenge0bcfbe2006-04-26 01:20:17 +0000493 }
Evan Chenge0bcfbe2006-04-26 01:20:17 +0000494 }
495
Evan Cheng17e734f2006-05-23 21:06:34 +0000496 ArgValues.push_back(Root);
497
Evan Chenge0bcfbe2006-04-26 01:20:17 +0000498 // If the function takes variable number of arguments, make a frame index for
499 // the start of the first vararg value... for expansion of llvm.va_start.
Evan Cheng7068a932006-05-23 21:08:24 +0000500 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
501 if (isVarArg)
Evan Chenge0bcfbe2006-04-26 01:20:17 +0000502 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
Evan Cheng11b0a5d2006-09-08 06:48:29 +0000503 RegSaveFrameIndex = 0xAAAAAAA; // X86-64 only.
504 ReturnAddrIndex = 0; // No return address slot generated yet.
505 BytesToPopOnReturn = 0; // Callee pops nothing.
Evan Chenge0bcfbe2006-04-26 01:20:17 +0000506 BytesCallerReserves = ArgOffset;
Evan Cheng17e734f2006-05-23 21:06:34 +0000507
Chris Lattner8be5be82006-05-23 18:50:38 +0000508 // If this is a struct return on Darwin/X86, the callee pops the hidden struct
509 // pointer.
Evan Cheng17e734f2006-05-23 21:06:34 +0000510 if (MF.getFunction()->getCallingConv() == CallingConv::CSRet &&
Chris Lattner8be5be82006-05-23 18:50:38 +0000511 Subtarget->isTargetDarwin())
512 BytesToPopOnReturn = 4;
Evan Chenge0bcfbe2006-04-26 01:20:17 +0000513
Evan Cheng17e734f2006-05-23 21:06:34 +0000514 // Return the new list of results.
515 std::vector<MVT::ValueType> RetVTs(Op.Val->value_begin(),
516 Op.Val->value_end());
Chris Lattnerc24a1d32006-08-08 02:23:42 +0000517 return DAG.getNode(ISD::MERGE_VALUES, RetVTs, &ArgValues[0],ArgValues.size());
Chris Lattner76ac0682005-11-15 00:40:23 +0000518}
519
Evan Cheng2a330942006-05-25 00:59:30 +0000520
521SDOperand X86TargetLowering::LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG) {
522 SDOperand Chain = Op.getOperand(0);
523 unsigned CallingConv= cast<ConstantSDNode>(Op.getOperand(1))->getValue();
524 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
525 bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0;
526 SDOperand Callee = Op.getOperand(4);
527 MVT::ValueType RetVT= Op.Val->getValueType(0);
528 unsigned NumOps = (Op.getNumOperands() - 5) / 2;
Chris Lattner76ac0682005-11-15 00:40:23 +0000529
Evan Cheng88decde2006-04-28 21:29:37 +0000530 // Keep track of the number of XMM regs passed so far.
531 unsigned NumXMMRegs = 0;
Evan Cheng2a330942006-05-25 00:59:30 +0000532 static const unsigned XMMArgRegs[] = {
Evan Chengbfb5ea62006-05-26 19:22:06 +0000533 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3
Evan Cheng2a330942006-05-25 00:59:30 +0000534 };
Evan Cheng88decde2006-04-28 21:29:37 +0000535
Evan Cheng2a330942006-05-25 00:59:30 +0000536 // Count how many bytes are to be pushed on the stack.
537 unsigned NumBytes = 0;
538 for (unsigned i = 0; i != NumOps; ++i) {
539 SDOperand Arg = Op.getOperand(5+2*i);
Chris Lattner76ac0682005-11-15 00:40:23 +0000540
Evan Cheng2a330942006-05-25 00:59:30 +0000541 switch (Arg.getValueType()) {
542 default: assert(0 && "Unexpected ValueType for argument!");
543 case MVT::i8:
544 case MVT::i16:
545 case MVT::i32:
546 case MVT::f32:
547 NumBytes += 4;
548 break;
549 case MVT::i64:
550 case MVT::f64:
551 NumBytes += 8;
552 break;
553 case MVT::v16i8:
554 case MVT::v8i16:
555 case MVT::v4i32:
556 case MVT::v2i64:
557 case MVT::v4f32:
Evan Cheng0421aca2006-05-25 22:38:31 +0000558 case MVT::v2f64:
Evan Chengbfb5ea62006-05-26 19:22:06 +0000559 if (NumXMMRegs < 4)
Evan Cheng2a330942006-05-25 00:59:30 +0000560 ++NumXMMRegs;
Evan Chengb92f4182006-05-26 20:37:47 +0000561 else {
562 // XMM arguments have to be aligned on 16-byte boundary.
563 NumBytes = ((NumBytes + 15) / 16) * 16;
Evan Cheng2a330942006-05-25 00:59:30 +0000564 NumBytes += 16;
Evan Chengb92f4182006-05-26 20:37:47 +0000565 }
Evan Cheng2a330942006-05-25 00:59:30 +0000566 break;
567 }
Evan Cheng2a330942006-05-25 00:59:30 +0000568 }
Chris Lattner76ac0682005-11-15 00:40:23 +0000569
Evan Cheng2a330942006-05-25 00:59:30 +0000570 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));
Chris Lattner76ac0682005-11-15 00:40:23 +0000571
Evan Cheng2a330942006-05-25 00:59:30 +0000572 // Arguments go on the stack in reverse order, as specified by the ABI.
573 unsigned ArgOffset = 0;
574 NumXMMRegs = 0;
575 std::vector<std::pair<unsigned, SDOperand> > RegsToPass;
576 std::vector<SDOperand> MemOpChains;
Evan Cheng11b0a5d2006-09-08 06:48:29 +0000577 SDOperand StackPtr = DAG.getRegister(X86StackPtr, getPointerTy());
Evan Cheng2a330942006-05-25 00:59:30 +0000578 for (unsigned i = 0; i != NumOps; ++i) {
579 SDOperand Arg = Op.getOperand(5+2*i);
580
581 switch (Arg.getValueType()) {
582 default: assert(0 && "Unexpected ValueType for argument!");
583 case MVT::i8:
Evan Cheng5ee96892006-05-25 18:56:34 +0000584 case MVT::i16: {
Evan Cheng2a330942006-05-25 00:59:30 +0000585 // Promote the integer to 32 bits. If the input type is signed use a
586 // sign extend, otherwise use a zero extend.
587 unsigned ExtOp =
588 dyn_cast<ConstantSDNode>(Op.getOperand(5+2*i+1))->getValue() ?
589 ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
590 Arg = DAG.getNode(ExtOp, MVT::i32, Arg);
Evan Cheng5ee96892006-05-25 18:56:34 +0000591 }
592 // Fallthrough
Evan Cheng2a330942006-05-25 00:59:30 +0000593
594 case MVT::i32:
595 case MVT::f32: {
596 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
597 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
598 MemOpChains.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
599 Arg, PtrOff, DAG.getSrcValue(NULL)));
600 ArgOffset += 4;
601 break;
602 }
603 case MVT::i64:
604 case MVT::f64: {
605 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
606 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
607 MemOpChains.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
608 Arg, PtrOff, DAG.getSrcValue(NULL)));
609 ArgOffset += 8;
610 break;
611 }
612 case MVT::v16i8:
613 case MVT::v8i16:
614 case MVT::v4i32:
615 case MVT::v2i64:
616 case MVT::v4f32:
Evan Cheng0421aca2006-05-25 22:38:31 +0000617 case MVT::v2f64:
Evan Chengbfb5ea62006-05-26 19:22:06 +0000618 if (NumXMMRegs < 4) {
Evan Cheng2a330942006-05-25 00:59:30 +0000619 RegsToPass.push_back(std::make_pair(XMMArgRegs[NumXMMRegs], Arg));
620 NumXMMRegs++;
621 } else {
Evan Chengb92f4182006-05-26 20:37:47 +0000622 // XMM arguments have to be aligned on 16-byte boundary.
623 ArgOffset = ((ArgOffset + 15) / 16) * 16;
Evan Cheng88decde2006-04-28 21:29:37 +0000624 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
Evan Cheng2a330942006-05-25 00:59:30 +0000625 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
626 MemOpChains.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
627 Arg, PtrOff, DAG.getSrcValue(NULL)));
628 ArgOffset += 16;
Evan Cheng88decde2006-04-28 21:29:37 +0000629 }
Chris Lattner76ac0682005-11-15 00:40:23 +0000630 }
Chris Lattner76ac0682005-11-15 00:40:23 +0000631 }
632
Evan Cheng2a330942006-05-25 00:59:30 +0000633 if (!MemOpChains.empty())
Chris Lattnerc24a1d32006-08-08 02:23:42 +0000634 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
635 &MemOpChains[0], MemOpChains.size());
Chris Lattner76ac0682005-11-15 00:40:23 +0000636
Evan Cheng88decde2006-04-28 21:29:37 +0000637 // Build a sequence of copy-to-reg nodes chained together with token chain
638 // and flag operands which copy the outgoing args into registers.
639 SDOperand InFlag;
Evan Cheng2a330942006-05-25 00:59:30 +0000640 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
641 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second,
642 InFlag);
Evan Cheng88decde2006-04-28 21:29:37 +0000643 InFlag = Chain.getValue(1);
644 }
645
Evan Cheng2a330942006-05-25 00:59:30 +0000646 // If the callee is a GlobalAddress node (quite common, every direct call is)
647 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
648 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
649 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
650 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
651 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
652
Nate Begeman7e5496d2006-02-17 00:03:04 +0000653 std::vector<MVT::ValueType> NodeTys;
654 NodeTys.push_back(MVT::Other); // Returns a chain
655 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
656 std::vector<SDOperand> Ops;
657 Ops.push_back(Chain);
658 Ops.push_back(Callee);
Evan Chengca254862006-06-14 18:17:40 +0000659
660 // Add argument registers to the end of the list so that they are known live
661 // into the call.
662 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
663 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
664 RegsToPass[i].second.getValueType()));
665
Evan Cheng88decde2006-04-28 21:29:37 +0000666 if (InFlag.Val)
667 Ops.push_back(InFlag);
Evan Cheng45e190982006-01-05 00:27:02 +0000668
Evan Cheng2a330942006-05-25 00:59:30 +0000669 Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
Chris Lattnerc24a1d32006-08-08 02:23:42 +0000670 NodeTys, &Ops[0], Ops.size());
Evan Cheng88decde2006-04-28 21:29:37 +0000671 InFlag = Chain.getValue(1);
Evan Cheng45e190982006-01-05 00:27:02 +0000672
Chris Lattner8be5be82006-05-23 18:50:38 +0000673 // Create the CALLSEQ_END node.
674 unsigned NumBytesForCalleeToPush = 0;
675
676 // If this is is a call to a struct-return function on Darwin/X86, the callee
677 // pops the hidden struct pointer, so we have to push it back.
678 if (CallingConv == CallingConv::CSRet && Subtarget->isTargetDarwin())
679 NumBytesForCalleeToPush = 4;
680
Nate Begeman7e5496d2006-02-17 00:03:04 +0000681 NodeTys.clear();
682 NodeTys.push_back(MVT::Other); // Returns a chain
Evan Cheng2a330942006-05-25 00:59:30 +0000683 if (RetVT != MVT::Other)
684 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
Nate Begeman7e5496d2006-02-17 00:03:04 +0000685 Ops.clear();
686 Ops.push_back(Chain);
687 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
Chris Lattner8be5be82006-05-23 18:50:38 +0000688 Ops.push_back(DAG.getConstant(NumBytesForCalleeToPush, getPointerTy()));
Nate Begeman7e5496d2006-02-17 00:03:04 +0000689 Ops.push_back(InFlag);
Chris Lattnerc24a1d32006-08-08 02:23:42 +0000690 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size());
Evan Cheng2a330942006-05-25 00:59:30 +0000691 if (RetVT != MVT::Other)
692 InFlag = Chain.getValue(1);
Nate Begeman7e5496d2006-02-17 00:03:04 +0000693
Evan Cheng2a330942006-05-25 00:59:30 +0000694 std::vector<SDOperand> ResultVals;
695 NodeTys.clear();
696 switch (RetVT) {
697 default: assert(0 && "Unknown value type to return!");
698 case MVT::Other: break;
699 case MVT::i8:
700 Chain = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag).getValue(1);
701 ResultVals.push_back(Chain.getValue(0));
702 NodeTys.push_back(MVT::i8);
703 break;
704 case MVT::i16:
705 Chain = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag).getValue(1);
706 ResultVals.push_back(Chain.getValue(0));
707 NodeTys.push_back(MVT::i16);
708 break;
709 case MVT::i32:
710 if (Op.Val->getValueType(1) == MVT::i32) {
711 Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1);
712 ResultVals.push_back(Chain.getValue(0));
713 Chain = DAG.getCopyFromReg(Chain, X86::EDX, MVT::i32,
714 Chain.getValue(2)).getValue(1);
715 ResultVals.push_back(Chain.getValue(0));
716 NodeTys.push_back(MVT::i32);
717 } else {
718 Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1);
719 ResultVals.push_back(Chain.getValue(0));
Evan Cheng45e190982006-01-05 00:27:02 +0000720 }
Evan Cheng2a330942006-05-25 00:59:30 +0000721 NodeTys.push_back(MVT::i32);
722 break;
723 case MVT::v16i8:
724 case MVT::v8i16:
725 case MVT::v4i32:
726 case MVT::v2i64:
727 case MVT::v4f32:
728 case MVT::v2f64:
Evan Cheng2a330942006-05-25 00:59:30 +0000729 Chain = DAG.getCopyFromReg(Chain, X86::XMM0, RetVT, InFlag).getValue(1);
730 ResultVals.push_back(Chain.getValue(0));
731 NodeTys.push_back(RetVT);
732 break;
733 case MVT::f32:
734 case MVT::f64: {
735 std::vector<MVT::ValueType> Tys;
736 Tys.push_back(MVT::f64);
737 Tys.push_back(MVT::Other);
738 Tys.push_back(MVT::Flag);
739 std::vector<SDOperand> Ops;
740 Ops.push_back(Chain);
741 Ops.push_back(InFlag);
Chris Lattnerc24a1d32006-08-08 02:23:42 +0000742 SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys,
743 &Ops[0], Ops.size());
Evan Cheng2a330942006-05-25 00:59:30 +0000744 Chain = RetVal.getValue(1);
745 InFlag = RetVal.getValue(2);
746 if (X86ScalarSSE) {
747 // FIXME: Currently the FST is flagged to the FP_GET_RESULT. This
748 // shouldn't be necessary except that RFP cannot be live across
749 // multiple blocks. When stackifier is fixed, they can be uncoupled.
750 MachineFunction &MF = DAG.getMachineFunction();
751 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
752 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
753 Tys.clear();
Nate Begeman7e5496d2006-02-17 00:03:04 +0000754 Tys.push_back(MVT::Other);
Evan Cheng2a330942006-05-25 00:59:30 +0000755 Ops.clear();
Nate Begeman7e5496d2006-02-17 00:03:04 +0000756 Ops.push_back(Chain);
Evan Cheng2a330942006-05-25 00:59:30 +0000757 Ops.push_back(RetVal);
758 Ops.push_back(StackSlot);
759 Ops.push_back(DAG.getValueType(RetVT));
Nate Begeman7e5496d2006-02-17 00:03:04 +0000760 Ops.push_back(InFlag);
Chris Lattnerc24a1d32006-08-08 02:23:42 +0000761 Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size());
Evan Cheng2a330942006-05-25 00:59:30 +0000762 RetVal = DAG.getLoad(RetVT, Chain, StackSlot,
763 DAG.getSrcValue(NULL));
Evan Cheng88decde2006-04-28 21:29:37 +0000764 Chain = RetVal.getValue(1);
Evan Cheng88decde2006-04-28 21:29:37 +0000765 }
Evan Cheng2a330942006-05-25 00:59:30 +0000766
767 if (RetVT == MVT::f32 && !X86ScalarSSE)
768 // FIXME: we would really like to remember that this FP_ROUND
769 // operation is okay to eliminate if we allow excess FP precision.
770 RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal);
771 ResultVals.push_back(RetVal);
772 NodeTys.push_back(RetVT);
773 break;
774 }
Chris Lattner76ac0682005-11-15 00:40:23 +0000775 }
Nate Begeman7e5496d2006-02-17 00:03:04 +0000776
Evan Cheng2a330942006-05-25 00:59:30 +0000777 // If the function returns void, just return the chain.
778 if (ResultVals.empty())
779 return Chain;
780
781 // Otherwise, merge everything together with a MERGE_VALUES node.
782 NodeTys.push_back(MVT::Other);
783 ResultVals.push_back(Chain);
Chris Lattnerc24a1d32006-08-08 02:23:42 +0000784 SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys,
785 &ResultVals[0], ResultVals.size());
Evan Cheng2a330942006-05-25 00:59:30 +0000786 return Res.getValue(Op.ResNo);
Chris Lattner76ac0682005-11-15 00:40:23 +0000787}
788
Evan Cheng11b0a5d2006-09-08 06:48:29 +0000789
790//===----------------------------------------------------------------------===//
791// X86-64 C Calling Convention implementation
792//===----------------------------------------------------------------------===//
793
794/// HowToPassX86_64CCCArgument - Returns how an formal argument of the specified
795/// type should be passed. If it is through stack, returns the size of the stack
796/// slot; if it is through integer or XMM register, returns the number of
797/// integer or XMM registers are needed.
798static void
799HowToPassX86_64CCCArgument(MVT::ValueType ObjectVT,
800 unsigned NumIntRegs, unsigned NumXMMRegs,
801 unsigned &ObjSize, unsigned &ObjIntRegs,
802 unsigned &ObjXMMRegs) {
803 ObjSize = 0;
804 ObjIntRegs = 0;
805 ObjXMMRegs = 0;
806
807 switch (ObjectVT) {
808 default: assert(0 && "Unhandled argument type!");
809 case MVT::i8:
810 case MVT::i16:
811 case MVT::i32:
812 case MVT::i64:
813 if (NumIntRegs < 6)
814 ObjIntRegs = 1;
815 else {
816 switch (ObjectVT) {
817 default: break;
818 case MVT::i8: ObjSize = 1; break;
819 case MVT::i16: ObjSize = 2; break;
820 case MVT::i32: ObjSize = 4; break;
821 case MVT::i64: ObjSize = 8; break;
822 }
823 }
824 break;
825 case MVT::f32:
826 case MVT::f64:
827 case MVT::v16i8:
828 case MVT::v8i16:
829 case MVT::v4i32:
830 case MVT::v2i64:
831 case MVT::v4f32:
832 case MVT::v2f64:
833 if (NumXMMRegs < 8)
834 ObjXMMRegs = 1;
835 else {
836 switch (ObjectVT) {
837 default: break;
838 case MVT::f32: ObjSize = 4; break;
839 case MVT::f64: ObjSize = 8; break;
840 case MVT::v16i8:
841 case MVT::v8i16:
842 case MVT::v4i32:
843 case MVT::v2i64:
844 case MVT::v4f32:
845 case MVT::v2f64: ObjSize = 16; break;
846 }
847 break;
848 }
849 }
850}
851
852SDOperand
853X86TargetLowering::LowerX86_64CCCArguments(SDOperand Op, SelectionDAG &DAG) {
854 unsigned NumArgs = Op.Val->getNumValues() - 1;
855 MachineFunction &MF = DAG.getMachineFunction();
856 MachineFrameInfo *MFI = MF.getFrameInfo();
857 SDOperand Root = Op.getOperand(0);
858 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
859 std::vector<SDOperand> ArgValues;
860
861 // Add DAG nodes to load the arguments... On entry to a function on the X86,
862 // the stack frame looks like this:
863 //
864 // [RSP] -- return address
865 // [RSP + 8] -- first nonreg argument (leftmost lexically)
866 // [RSP +16] -- second nonreg argument, if 1st argument is <= 8 bytes in size
867 // ...
868 //
869 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
870 unsigned NumIntRegs = 0; // Int regs used for parameter passing.
871 unsigned NumXMMRegs = 0; // XMM regs used for parameter passing.
872
873 static const unsigned GPR8ArgRegs[] = {
874 X86::DIL, X86::SIL, X86::DL, X86::CL, X86::R8B, X86::R9B
875 };
876 static const unsigned GPR16ArgRegs[] = {
877 X86::DI, X86::SI, X86::DX, X86::CX, X86::R8W, X86::R9W
878 };
879 static const unsigned GPR32ArgRegs[] = {
880 X86::EDI, X86::ESI, X86::EDX, X86::ECX, X86::R8D, X86::R9D
881 };
882 static const unsigned GPR64ArgRegs[] = {
883 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
884 };
885 static const unsigned XMMArgRegs[] = {
886 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
887 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
888 };
889
890 for (unsigned i = 0; i < NumArgs; ++i) {
891 MVT::ValueType ObjectVT = Op.getValue(i).getValueType();
892 unsigned ArgIncrement = 8;
893 unsigned ObjSize = 0;
894 unsigned ObjIntRegs = 0;
895 unsigned ObjXMMRegs = 0;
896
897 // FIXME: __int128 and long double support?
898 HowToPassX86_64CCCArgument(ObjectVT, NumIntRegs, NumXMMRegs,
899 ObjSize, ObjIntRegs, ObjXMMRegs);
900 if (ObjSize > 8)
901 ArgIncrement = ObjSize;
902
903 unsigned Reg = 0;
904 SDOperand ArgValue;
905 if (ObjIntRegs || ObjXMMRegs) {
906 switch (ObjectVT) {
907 default: assert(0 && "Unhandled argument type!");
908 case MVT::i8:
909 case MVT::i16:
910 case MVT::i32:
911 case MVT::i64: {
912 TargetRegisterClass *RC = NULL;
913 switch (ObjectVT) {
914 default: break;
915 case MVT::i8:
916 RC = X86::GR8RegisterClass;
917 Reg = GPR8ArgRegs[NumIntRegs];
918 break;
919 case MVT::i16:
920 RC = X86::GR16RegisterClass;
921 Reg = GPR16ArgRegs[NumIntRegs];
922 break;
923 case MVT::i32:
924 RC = X86::GR32RegisterClass;
925 Reg = GPR32ArgRegs[NumIntRegs];
926 break;
927 case MVT::i64:
928 RC = X86::GR64RegisterClass;
929 Reg = GPR64ArgRegs[NumIntRegs];
930 break;
931 }
932 Reg = AddLiveIn(MF, Reg, RC);
933 ArgValue = DAG.getCopyFromReg(Root, Reg, ObjectVT);
934 break;
935 }
936 case MVT::f32:
937 case MVT::f64:
938 case MVT::v16i8:
939 case MVT::v8i16:
940 case MVT::v4i32:
941 case MVT::v2i64:
942 case MVT::v4f32:
943 case MVT::v2f64: {
944 TargetRegisterClass *RC= (ObjectVT == MVT::f32) ?
945 X86::FR32RegisterClass : ((ObjectVT == MVT::f64) ?
946 X86::FR64RegisterClass : X86::VR128RegisterClass);
947 Reg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], RC);
948 ArgValue = DAG.getCopyFromReg(Root, Reg, ObjectVT);
949 break;
950 }
951 }
952 NumIntRegs += ObjIntRegs;
953 NumXMMRegs += ObjXMMRegs;
954 } else if (ObjSize) {
955 // XMM arguments have to be aligned on 16-byte boundary.
956 if (ObjSize == 16)
957 ArgOffset = ((ArgOffset + 15) / 16) * 16;
958 // Create the SelectionDAG nodes corresponding to a load from this
959 // parameter.
960 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
961 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy());
962 ArgValue = DAG.getLoad(Op.Val->getValueType(i), Root, FIN,
963 DAG.getSrcValue(NULL));
964 ArgOffset += ArgIncrement; // Move on to the next argument.
965 }
966
967 ArgValues.push_back(ArgValue);
968 }
969
970 // If the function takes variable number of arguments, make a frame index for
971 // the start of the first vararg value... for expansion of llvm.va_start.
972 if (isVarArg) {
973 // For X86-64, if there are vararg parameters that are passed via
974 // registers, then we must store them to their spots on the stack so they
975 // may be loaded by deferencing the result of va_next.
976 VarArgsGPOffset = NumIntRegs * 8;
977 VarArgsFPOffset = 6 * 8 + NumXMMRegs * 16;
978 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
979 RegSaveFrameIndex = MFI->CreateStackObject(6 * 8 + 8 * 16, 16);
980
981 // Store the integer parameter registers.
982 std::vector<SDOperand> MemOps;
983 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy());
984 SDOperand FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN,
985 DAG.getConstant(VarArgsGPOffset, getPointerTy()));
986 for (; NumIntRegs != 6; ++NumIntRegs) {
987 unsigned VReg = AddLiveIn(MF, GPR64ArgRegs[NumIntRegs],
988 X86::GR64RegisterClass);
989 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::i64);
990 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, Val.getValue(1),
991 Val, FIN, DAG.getSrcValue(NULL));
992 MemOps.push_back(Store);
993 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
994 DAG.getConstant(8, getPointerTy()));
995 }
996
997 // Now store the XMM (fp + vector) parameter registers.
998 FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN,
999 DAG.getConstant(VarArgsFPOffset, getPointerTy()));
1000 for (; NumXMMRegs != 8; ++NumXMMRegs) {
1001 unsigned VReg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs],
1002 X86::VR128RegisterClass);
1003 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::v4f32);
1004 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, Val.getValue(1),
1005 Val, FIN, DAG.getSrcValue(NULL));
1006 MemOps.push_back(Store);
1007 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
1008 DAG.getConstant(16, getPointerTy()));
1009 }
1010 if (!MemOps.empty())
1011 Root = DAG.getNode(ISD::TokenFactor, MVT::Other,
1012 &MemOps[0], MemOps.size());
1013 }
1014
1015 ArgValues.push_back(Root);
1016
1017 ReturnAddrIndex = 0; // No return address slot generated yet.
1018 BytesToPopOnReturn = 0; // Callee pops nothing.
1019 BytesCallerReserves = ArgOffset;
1020
1021 // Return the new list of results.
1022 std::vector<MVT::ValueType> RetVTs(Op.Val->value_begin(),
1023 Op.Val->value_end());
1024 return DAG.getNode(ISD::MERGE_VALUES, RetVTs, &ArgValues[0],ArgValues.size());
1025}
1026
1027SDOperand
1028X86TargetLowering::LowerX86_64CCCCallTo(SDOperand Op, SelectionDAG &DAG) {
1029 SDOperand Chain = Op.getOperand(0);
1030 unsigned CallingConv= cast<ConstantSDNode>(Op.getOperand(1))->getValue();
1031 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
1032 bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0;
1033 SDOperand Callee = Op.getOperand(4);
1034 MVT::ValueType RetVT= Op.Val->getValueType(0);
1035 unsigned NumOps = (Op.getNumOperands() - 5) / 2;
1036
1037 // Count how many bytes are to be pushed on the stack.
1038 unsigned NumBytes = 0;
1039 unsigned NumIntRegs = 0; // Int regs used for parameter passing.
1040 unsigned NumXMMRegs = 0; // XMM regs used for parameter passing.
1041
1042 static const unsigned GPR8ArgRegs[] = {
1043 X86::DIL, X86::SIL, X86::DL, X86::CL, X86::R8B, X86::R9B
1044 };
1045 static const unsigned GPR16ArgRegs[] = {
1046 X86::DI, X86::SI, X86::DX, X86::CX, X86::R8W, X86::R9W
1047 };
1048 static const unsigned GPR32ArgRegs[] = {
1049 X86::EDI, X86::ESI, X86::EDX, X86::ECX, X86::R8D, X86::R9D
1050 };
1051 static const unsigned GPR64ArgRegs[] = {
1052 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
1053 };
1054 static const unsigned XMMArgRegs[] = {
1055 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
1056 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
1057 };
1058
1059 for (unsigned i = 0; i != NumOps; ++i) {
1060 SDOperand Arg = Op.getOperand(5+2*i);
1061 MVT::ValueType ArgVT = Arg.getValueType();
1062
1063 switch (ArgVT) {
1064 default: assert(0 && "Unknown value type!");
1065 case MVT::i8:
1066 case MVT::i16:
1067 case MVT::i32:
1068 case MVT::i64:
1069 if (NumIntRegs < 6)
1070 ++NumIntRegs;
1071 else
1072 NumBytes += 8;
1073 break;
1074 case MVT::f32:
1075 case MVT::f64:
1076 case MVT::v16i8:
1077 case MVT::v8i16:
1078 case MVT::v4i32:
1079 case MVT::v2i64:
1080 case MVT::v4f32:
1081 case MVT::v2f64:
1082 if (NumXMMRegs < 8)
1083 NumXMMRegs++;
1084 else if (ArgVT == MVT::f32 || ArgVT == MVT::f64)
1085 NumBytes += 8;
1086 else {
1087 // XMM arguments have to be aligned on 16-byte boundary.
1088 NumBytes = ((NumBytes + 15) / 16) * 16;
1089 NumBytes += 16;
1090 }
1091 break;
1092 }
1093 }
1094
1095 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));
1096
1097 // Arguments go on the stack in reverse order, as specified by the ABI.
1098 unsigned ArgOffset = 0;
1099 NumIntRegs = 0;
1100 NumXMMRegs = 0;
1101 std::vector<std::pair<unsigned, SDOperand> > RegsToPass;
1102 std::vector<SDOperand> MemOpChains;
1103 SDOperand StackPtr = DAG.getRegister(X86StackPtr, getPointerTy());
1104 for (unsigned i = 0; i != NumOps; ++i) {
1105 SDOperand Arg = Op.getOperand(5+2*i);
1106 MVT::ValueType ArgVT = Arg.getValueType();
1107
1108 switch (ArgVT) {
1109 default: assert(0 && "Unexpected ValueType for argument!");
1110 case MVT::i8:
1111 case MVT::i16:
1112 case MVT::i32:
1113 case MVT::i64:
1114 if (NumIntRegs < 6) {
1115 unsigned Reg = 0;
1116 switch (ArgVT) {
1117 default: break;
1118 case MVT::i8: Reg = GPR8ArgRegs[NumIntRegs]; break;
1119 case MVT::i16: Reg = GPR16ArgRegs[NumIntRegs]; break;
1120 case MVT::i32: Reg = GPR32ArgRegs[NumIntRegs]; break;
1121 case MVT::i64: Reg = GPR64ArgRegs[NumIntRegs]; break;
1122 }
1123 RegsToPass.push_back(std::make_pair(Reg, Arg));
1124 ++NumIntRegs;
1125 } else {
1126 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
1127 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
1128 MemOpChains.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
1129 Arg, PtrOff, DAG.getSrcValue(NULL)));
1130 ArgOffset += 8;
1131 }
1132 break;
1133 case MVT::f32:
1134 case MVT::f64:
1135 case MVT::v16i8:
1136 case MVT::v8i16:
1137 case MVT::v4i32:
1138 case MVT::v2i64:
1139 case MVT::v4f32:
1140 case MVT::v2f64:
1141 if (NumXMMRegs < 8) {
1142 RegsToPass.push_back(std::make_pair(XMMArgRegs[NumXMMRegs], Arg));
1143 NumXMMRegs++;
1144 } else {
1145 if (ArgVT != MVT::f32 && ArgVT != MVT::f64) {
1146 // XMM arguments have to be aligned on 16-byte boundary.
1147 ArgOffset = ((ArgOffset + 15) / 16) * 16;
1148 }
1149 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
1150 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
1151 MemOpChains.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
1152 Arg, PtrOff, DAG.getSrcValue(NULL)));
1153 if (ArgVT == MVT::f32 || ArgVT == MVT::f64)
1154 ArgOffset += 8;
1155 else
1156 ArgOffset += 16;
1157 }
1158 }
1159 }
1160
1161 if (!MemOpChains.empty())
1162 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
1163 &MemOpChains[0], MemOpChains.size());
1164
1165 // Build a sequence of copy-to-reg nodes chained together with token chain
1166 // and flag operands which copy the outgoing args into registers.
1167 SDOperand InFlag;
1168 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1169 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second,
1170 InFlag);
1171 InFlag = Chain.getValue(1);
1172 }
1173
1174 if (isVarArg) {
1175 // From AMD64 ABI document:
1176 // For calls that may call functions that use varargs or stdargs
1177 // (prototype-less calls or calls to functions containing ellipsis (...) in
1178 // the declaration) %al is used as hidden argument to specify the number
1179 // of SSE registers used. The contents of %al do not need to match exactly
1180 // the number of registers, but must be an ubound on the number of SSE
1181 // registers used and is in the range 0 - 8 inclusive.
1182 Chain = DAG.getCopyToReg(Chain, X86::AL,
1183 DAG.getConstant(NumXMMRegs, MVT::i8), InFlag);
1184 InFlag = Chain.getValue(1);
1185 }
1186
1187 // If the callee is a GlobalAddress node (quite common, every direct call is)
1188 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1189 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1190 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
1191 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
1192 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
1193
1194 std::vector<MVT::ValueType> NodeTys;
1195 NodeTys.push_back(MVT::Other); // Returns a chain
1196 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
1197 std::vector<SDOperand> Ops;
1198 Ops.push_back(Chain);
1199 Ops.push_back(Callee);
1200
1201 // Add argument registers to the end of the list so that they are known live
1202 // into the call.
1203 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1204 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1205 RegsToPass[i].second.getValueType()));
1206
1207 if (InFlag.Val)
1208 Ops.push_back(InFlag);
1209
1210 // FIXME: Do not generate X86ISD::TAILCALL for now.
1211 Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
1212 NodeTys, &Ops[0], Ops.size());
1213 InFlag = Chain.getValue(1);
1214
1215 NodeTys.clear();
1216 NodeTys.push_back(MVT::Other); // Returns a chain
1217 if (RetVT != MVT::Other)
1218 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
1219 Ops.clear();
1220 Ops.push_back(Chain);
1221 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
1222 Ops.push_back(DAG.getConstant(0, getPointerTy()));
1223 Ops.push_back(InFlag);
1224 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size());
1225 if (RetVT != MVT::Other)
1226 InFlag = Chain.getValue(1);
1227
1228 std::vector<SDOperand> ResultVals;
1229 NodeTys.clear();
1230 switch (RetVT) {
1231 default: assert(0 && "Unknown value type to return!");
1232 case MVT::Other: break;
1233 case MVT::i8:
1234 Chain = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag).getValue(1);
1235 ResultVals.push_back(Chain.getValue(0));
1236 NodeTys.push_back(MVT::i8);
1237 break;
1238 case MVT::i16:
1239 Chain = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag).getValue(1);
1240 ResultVals.push_back(Chain.getValue(0));
1241 NodeTys.push_back(MVT::i16);
1242 break;
1243 case MVT::i32:
1244 Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1);
1245 ResultVals.push_back(Chain.getValue(0));
1246 NodeTys.push_back(MVT::i32);
1247 break;
1248 case MVT::i64:
1249 if (Op.Val->getValueType(1) == MVT::i64) {
1250 // FIXME: __int128 support?
1251 Chain = DAG.getCopyFromReg(Chain, X86::RAX, MVT::i64, InFlag).getValue(1);
1252 ResultVals.push_back(Chain.getValue(0));
1253 Chain = DAG.getCopyFromReg(Chain, X86::RDX, MVT::i64,
1254 Chain.getValue(2)).getValue(1);
1255 ResultVals.push_back(Chain.getValue(0));
1256 NodeTys.push_back(MVT::i64);
1257 } else {
1258 Chain = DAG.getCopyFromReg(Chain, X86::RAX, MVT::i64, InFlag).getValue(1);
1259 ResultVals.push_back(Chain.getValue(0));
1260 }
1261 NodeTys.push_back(MVT::i64);
1262 break;
1263 case MVT::f32:
1264 case MVT::f64:
1265 case MVT::v16i8:
1266 case MVT::v8i16:
1267 case MVT::v4i32:
1268 case MVT::v2i64:
1269 case MVT::v4f32:
1270 case MVT::v2f64:
1271 // FIXME: long double support?
1272 Chain = DAG.getCopyFromReg(Chain, X86::XMM0, RetVT, InFlag).getValue(1);
1273 ResultVals.push_back(Chain.getValue(0));
1274 NodeTys.push_back(RetVT);
1275 break;
1276 }
1277
1278 // If the function returns void, just return the chain.
1279 if (ResultVals.empty())
1280 return Chain;
1281
1282 // Otherwise, merge everything together with a MERGE_VALUES node.
1283 NodeTys.push_back(MVT::Other);
1284 ResultVals.push_back(Chain);
1285 SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys,
1286 &ResultVals[0], ResultVals.size());
1287 return Res.getValue(Op.ResNo);
1288}
1289
Chris Lattner76ac0682005-11-15 00:40:23 +00001290//===----------------------------------------------------------------------===//
1291// Fast Calling Convention implementation
1292//===----------------------------------------------------------------------===//
1293//
1294// The X86 'fast' calling convention passes up to two integer arguments in
1295// registers (an appropriate portion of EAX/EDX), passes arguments in C order,
1296// and requires that the callee pop its arguments off the stack (allowing proper
1297// tail calls), and has the same return value conventions as C calling convs.
1298//
1299// This calling convention always arranges for the callee pop value to be 8n+4
1300// bytes, which is needed for tail recursion elimination and stack alignment
1301// reasons.
1302//
1303// Note that this can be enhanced in the future to pass fp vals in registers
1304// (when we have a global fp allocator) and do other tricks.
1305//
1306
Evan Cheng89001ad2006-04-27 08:31:10 +00001307/// HowToPassFastCCArgument - Returns how an formal argument of the specified
1308/// type should be passed. If it is through stack, returns the size of the stack
Evan Cheng763f9b02006-05-26 18:25:43 +00001309/// slot; if it is through integer or XMM register, returns the number of
Evan Cheng89001ad2006-04-27 08:31:10 +00001310/// integer or XMM registers are needed.
Evan Cheng48940d12006-04-27 01:32:22 +00001311static void
Evan Cheng89001ad2006-04-27 08:31:10 +00001312HowToPassFastCCArgument(MVT::ValueType ObjectVT,
1313 unsigned NumIntRegs, unsigned NumXMMRegs,
1314 unsigned &ObjSize, unsigned &ObjIntRegs,
1315 unsigned &ObjXMMRegs) {
Evan Cheng48940d12006-04-27 01:32:22 +00001316 ObjSize = 0;
Evan Cheng2b2c1be2006-06-01 05:53:27 +00001317 ObjIntRegs = 0;
1318 ObjXMMRegs = 0;
Evan Cheng48940d12006-04-27 01:32:22 +00001319
1320 switch (ObjectVT) {
1321 default: assert(0 && "Unhandled argument type!");
Evan Cheng48940d12006-04-27 01:32:22 +00001322 case MVT::i8:
Evan Cheng38c5aee2006-06-24 08:36:10 +00001323#if FASTCC_NUM_INT_ARGS_INREGS > 0
Evan Cheng48940d12006-04-27 01:32:22 +00001324 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS)
Evan Cheng24eb3f42006-04-27 05:35:28 +00001325 ObjIntRegs = 1;
Evan Cheng48940d12006-04-27 01:32:22 +00001326 else
Evan Cheng38c5aee2006-06-24 08:36:10 +00001327#endif
Evan Cheng48940d12006-04-27 01:32:22 +00001328 ObjSize = 1;
1329 break;
1330 case MVT::i16:
Evan Cheng38c5aee2006-06-24 08:36:10 +00001331#if FASTCC_NUM_INT_ARGS_INREGS > 0
Evan Cheng48940d12006-04-27 01:32:22 +00001332 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS)
Evan Cheng24eb3f42006-04-27 05:35:28 +00001333 ObjIntRegs = 1;
Evan Cheng48940d12006-04-27 01:32:22 +00001334 else
Evan Cheng38c5aee2006-06-24 08:36:10 +00001335#endif
Evan Cheng48940d12006-04-27 01:32:22 +00001336 ObjSize = 2;
1337 break;
1338 case MVT::i32:
Evan Cheng38c5aee2006-06-24 08:36:10 +00001339#if FASTCC_NUM_INT_ARGS_INREGS > 0
Evan Cheng48940d12006-04-27 01:32:22 +00001340 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS)
Evan Cheng24eb3f42006-04-27 05:35:28 +00001341 ObjIntRegs = 1;
Evan Cheng48940d12006-04-27 01:32:22 +00001342 else
Evan Cheng38c5aee2006-06-24 08:36:10 +00001343#endif
Evan Cheng48940d12006-04-27 01:32:22 +00001344 ObjSize = 4;
1345 break;
1346 case MVT::i64:
Evan Cheng38c5aee2006-06-24 08:36:10 +00001347#if FASTCC_NUM_INT_ARGS_INREGS > 0
Evan Cheng48940d12006-04-27 01:32:22 +00001348 if (NumIntRegs+2 <= FASTCC_NUM_INT_ARGS_INREGS) {
Evan Cheng24eb3f42006-04-27 05:35:28 +00001349 ObjIntRegs = 2;
Evan Cheng48940d12006-04-27 01:32:22 +00001350 } else if (NumIntRegs+1 <= FASTCC_NUM_INT_ARGS_INREGS) {
Evan Cheng24eb3f42006-04-27 05:35:28 +00001351 ObjIntRegs = 1;
Evan Cheng48940d12006-04-27 01:32:22 +00001352 ObjSize = 4;
1353 } else
Evan Cheng38c5aee2006-06-24 08:36:10 +00001354#endif
Evan Cheng48940d12006-04-27 01:32:22 +00001355 ObjSize = 8;
1356 case MVT::f32:
1357 ObjSize = 4;
1358 break;
1359 case MVT::f64:
1360 ObjSize = 8;
1361 break;
Evan Cheng89001ad2006-04-27 08:31:10 +00001362 case MVT::v16i8:
1363 case MVT::v8i16:
1364 case MVT::v4i32:
1365 case MVT::v2i64:
1366 case MVT::v4f32:
1367 case MVT::v2f64:
Evan Chengbfb5ea62006-05-26 19:22:06 +00001368 if (NumXMMRegs < 4)
Evan Cheng89001ad2006-04-27 08:31:10 +00001369 ObjXMMRegs = 1;
1370 else
1371 ObjSize = 16;
1372 break;
Evan Cheng48940d12006-04-27 01:32:22 +00001373 }
1374}
1375
Evan Cheng17e734f2006-05-23 21:06:34 +00001376SDOperand
1377X86TargetLowering::LowerFastCCArguments(SDOperand Op, SelectionDAG &DAG) {
1378 unsigned NumArgs = Op.Val->getNumValues()-1;
Chris Lattner76ac0682005-11-15 00:40:23 +00001379 MachineFunction &MF = DAG.getMachineFunction();
1380 MachineFrameInfo *MFI = MF.getFrameInfo();
Evan Cheng17e734f2006-05-23 21:06:34 +00001381 SDOperand Root = Op.getOperand(0);
1382 std::vector<SDOperand> ArgValues;
Chris Lattner76ac0682005-11-15 00:40:23 +00001383
Evan Cheng48940d12006-04-27 01:32:22 +00001384 // Add DAG nodes to load the arguments... On entry to a function the stack
1385 // frame looks like this:
1386 //
1387 // [ESP] -- return address
1388 // [ESP + 4] -- first nonreg argument (leftmost lexically)
Evan Chengcbfb3d02006-05-26 18:37:16 +00001389 // [ESP + 8] -- second nonreg argument, if 1st argument is <= 4 bytes in size
Evan Cheng48940d12006-04-27 01:32:22 +00001390 // ...
Chris Lattner76ac0682005-11-15 00:40:23 +00001391 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
1392
1393 // Keep track of the number of integer regs passed so far. This can be either
1394 // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
1395 // used).
1396 unsigned NumIntRegs = 0;
Evan Cheng89001ad2006-04-27 08:31:10 +00001397 unsigned NumXMMRegs = 0; // XMM regs used for parameter passing.
Evan Cheng2a330942006-05-25 00:59:30 +00001398
1399 static const unsigned XMMArgRegs[] = {
Evan Chengbfb5ea62006-05-26 19:22:06 +00001400 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3
Evan Cheng2a330942006-05-25 00:59:30 +00001401 };
Chris Lattner43798852006-03-17 05:10:20 +00001402
Evan Chenge0bcfbe2006-04-26 01:20:17 +00001403 for (unsigned i = 0; i < NumArgs; ++i) {
Evan Cheng17e734f2006-05-23 21:06:34 +00001404 MVT::ValueType ObjectVT = Op.getValue(i).getValueType();
1405 unsigned ArgIncrement = 4;
1406 unsigned ObjSize = 0;
1407 unsigned ObjIntRegs = 0;
1408 unsigned ObjXMMRegs = 0;
Chris Lattner76ac0682005-11-15 00:40:23 +00001409
Evan Cheng17e734f2006-05-23 21:06:34 +00001410 HowToPassFastCCArgument(ObjectVT, NumIntRegs, NumXMMRegs,
1411 ObjSize, ObjIntRegs, ObjXMMRegs);
Evan Chenga01e7992006-05-26 18:39:59 +00001412 if (ObjSize > 4)
Evan Cheng17e734f2006-05-23 21:06:34 +00001413 ArgIncrement = ObjSize;
Evan Cheng48940d12006-04-27 01:32:22 +00001414
Evan Cheng2489ccd2006-06-01 00:30:39 +00001415 unsigned Reg = 0;
Evan Cheng17e734f2006-05-23 21:06:34 +00001416 SDOperand ArgValue;
1417 if (ObjIntRegs || ObjXMMRegs) {
1418 switch (ObjectVT) {
1419 default: assert(0 && "Unhandled argument type!");
Evan Cheng17e734f2006-05-23 21:06:34 +00001420 case MVT::i8:
1421 Reg = AddLiveIn(MF, NumIntRegs ? X86::DL : X86::AL,
1422 X86::GR8RegisterClass);
1423 ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i8);
1424 break;
1425 case MVT::i16:
1426 Reg = AddLiveIn(MF, NumIntRegs ? X86::DX : X86::AX,
1427 X86::GR16RegisterClass);
1428 ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i16);
1429 break;
1430 case MVT::i32:
1431 Reg = AddLiveIn(MF, NumIntRegs ? X86::EDX : X86::EAX,
1432 X86::GR32RegisterClass);
1433 ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i32);
1434 break;
1435 case MVT::i64:
1436 Reg = AddLiveIn(MF, NumIntRegs ? X86::EDX : X86::EAX,
1437 X86::GR32RegisterClass);
1438 ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i32);
1439 if (ObjIntRegs == 2) {
1440 Reg = AddLiveIn(MF, X86::EDX, X86::GR32RegisterClass);
1441 SDOperand ArgValue2 = DAG.getCopyFromReg(Root, Reg, MVT::i32);
1442 ArgValue= DAG.getNode(ISD::BUILD_PAIR, MVT::i64, ArgValue, ArgValue2);
Evan Cheng24eb3f42006-04-27 05:35:28 +00001443 }
Evan Cheng17e734f2006-05-23 21:06:34 +00001444 break;
1445 case MVT::v16i8:
1446 case MVT::v8i16:
1447 case MVT::v4i32:
1448 case MVT::v2i64:
1449 case MVT::v4f32:
1450 case MVT::v2f64:
1451 Reg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], X86::VR128RegisterClass);
1452 ArgValue = DAG.getCopyFromReg(Root, Reg, ObjectVT);
1453 break;
Evan Cheng48940d12006-04-27 01:32:22 +00001454 }
Evan Cheng17e734f2006-05-23 21:06:34 +00001455 NumIntRegs += ObjIntRegs;
1456 NumXMMRegs += ObjXMMRegs;
Chris Lattner76ac0682005-11-15 00:40:23 +00001457 }
Evan Cheng17e734f2006-05-23 21:06:34 +00001458
1459 if (ObjSize) {
Evan Chengb92f4182006-05-26 20:37:47 +00001460 // XMM arguments have to be aligned on 16-byte boundary.
1461 if (ObjSize == 16)
1462 ArgOffset = ((ArgOffset + 15) / 16) * 16;
Evan Cheng17e734f2006-05-23 21:06:34 +00001463 // Create the SelectionDAG nodes corresponding to a load from this
1464 // parameter.
1465 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
1466 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy());
1467 if (ObjectVT == MVT::i64 && ObjIntRegs) {
1468 SDOperand ArgValue2 = DAG.getLoad(Op.Val->getValueType(i), Root, FIN,
1469 DAG.getSrcValue(NULL));
1470 ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, ArgValue, ArgValue2);
1471 } else
1472 ArgValue = DAG.getLoad(Op.Val->getValueType(i), Root, FIN,
1473 DAG.getSrcValue(NULL));
1474 ArgOffset += ArgIncrement; // Move on to the next argument.
1475 }
1476
1477 ArgValues.push_back(ArgValue);
Chris Lattner76ac0682005-11-15 00:40:23 +00001478 }
1479
Evan Cheng17e734f2006-05-23 21:06:34 +00001480 ArgValues.push_back(Root);
1481
Chris Lattner76ac0682005-11-15 00:40:23 +00001482 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
1483 // arguments and the arguments after the retaddr has been pushed are aligned.
1484 if ((ArgOffset & 7) == 0)
1485 ArgOffset += 4;
1486
1487 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs.
Evan Cheng11b0a5d2006-09-08 06:48:29 +00001488 RegSaveFrameIndex = 0xAAAAAAA; // X86-64 only.
Chris Lattner76ac0682005-11-15 00:40:23 +00001489 ReturnAddrIndex = 0; // No return address slot generated yet.
1490 BytesToPopOnReturn = ArgOffset; // Callee pops all stack arguments.
1491 BytesCallerReserves = 0;
1492
1493 // Finally, inform the code generator which regs we return values in.
Evan Cheng17e734f2006-05-23 21:06:34 +00001494 switch (getValueType(MF.getFunction()->getReturnType())) {
Chris Lattner76ac0682005-11-15 00:40:23 +00001495 default: assert(0 && "Unknown type!");
1496 case MVT::isVoid: break;
Chris Lattner76ac0682005-11-15 00:40:23 +00001497 case MVT::i8:
1498 case MVT::i16:
1499 case MVT::i32:
1500 MF.addLiveOut(X86::EAX);
1501 break;
1502 case MVT::i64:
1503 MF.addLiveOut(X86::EAX);
1504 MF.addLiveOut(X86::EDX);
1505 break;
1506 case MVT::f32:
1507 case MVT::f64:
1508 MF.addLiveOut(X86::ST0);
1509 break;
Evan Cheng5ee96892006-05-25 18:56:34 +00001510 case MVT::v16i8:
1511 case MVT::v8i16:
1512 case MVT::v4i32:
1513 case MVT::v2i64:
1514 case MVT::v4f32:
1515 case MVT::v2f64:
Evan Cheng88decde2006-04-28 21:29:37 +00001516 MF.addLiveOut(X86::XMM0);
1517 break;
1518 }
Evan Cheng88decde2006-04-28 21:29:37 +00001519
Evan Cheng17e734f2006-05-23 21:06:34 +00001520 // Return the new list of results.
1521 std::vector<MVT::ValueType> RetVTs(Op.Val->value_begin(),
1522 Op.Val->value_end());
Chris Lattnerc24a1d32006-08-08 02:23:42 +00001523 return DAG.getNode(ISD::MERGE_VALUES, RetVTs, &ArgValues[0],ArgValues.size());
Chris Lattner76ac0682005-11-15 00:40:23 +00001524}
1525
Chris Lattnerc24a1d32006-08-08 02:23:42 +00001526SDOperand X86TargetLowering::LowerFastCCCallTo(SDOperand Op, SelectionDAG &DAG){
Evan Cheng2a330942006-05-25 00:59:30 +00001527 SDOperand Chain = Op.getOperand(0);
1528 unsigned CallingConv= cast<ConstantSDNode>(Op.getOperand(1))->getValue();
1529 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
1530 bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0;
1531 SDOperand Callee = Op.getOperand(4);
1532 MVT::ValueType RetVT= Op.Val->getValueType(0);
1533 unsigned NumOps = (Op.getNumOperands() - 5) / 2;
1534
Chris Lattner76ac0682005-11-15 00:40:23 +00001535 // Count how many bytes are to be pushed on the stack.
1536 unsigned NumBytes = 0;
1537
1538 // Keep track of the number of integer regs passed so far. This can be either
1539 // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
1540 // used).
1541 unsigned NumIntRegs = 0;
Evan Cheng2a330942006-05-25 00:59:30 +00001542 unsigned NumXMMRegs = 0; // XMM regs used for parameter passing.
Chris Lattner76ac0682005-11-15 00:40:23 +00001543
Evan Cheng2a330942006-05-25 00:59:30 +00001544 static const unsigned GPRArgRegs[][2] = {
1545 { X86::AL, X86::DL },
1546 { X86::AX, X86::DX },
1547 { X86::EAX, X86::EDX }
1548 };
1549 static const unsigned XMMArgRegs[] = {
Evan Chengbfb5ea62006-05-26 19:22:06 +00001550 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3
Evan Cheng2a330942006-05-25 00:59:30 +00001551 };
1552
1553 for (unsigned i = 0; i != NumOps; ++i) {
1554 SDOperand Arg = Op.getOperand(5+2*i);
1555
1556 switch (Arg.getValueType()) {
Chris Lattner76ac0682005-11-15 00:40:23 +00001557 default: assert(0 && "Unknown value type!");
Chris Lattner76ac0682005-11-15 00:40:23 +00001558 case MVT::i8:
1559 case MVT::i16:
1560 case MVT::i32:
Evan Cheng38c5aee2006-06-24 08:36:10 +00001561#if FASTCC_NUM_INT_ARGS_INREGS > 0
Chris Lattner43798852006-03-17 05:10:20 +00001562 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
Chris Lattner76ac0682005-11-15 00:40:23 +00001563 ++NumIntRegs;
1564 break;
1565 }
Evan Cheng38c5aee2006-06-24 08:36:10 +00001566#endif
Evan Cheng0421aca2006-05-25 22:38:31 +00001567 // Fall through
Chris Lattner76ac0682005-11-15 00:40:23 +00001568 case MVT::f32:
1569 NumBytes += 4;
1570 break;
Chris Lattner76ac0682005-11-15 00:40:23 +00001571 case MVT::f64:
1572 NumBytes += 8;
1573 break;
Evan Cheng2a330942006-05-25 00:59:30 +00001574 case MVT::v16i8:
1575 case MVT::v8i16:
1576 case MVT::v4i32:
1577 case MVT::v2i64:
1578 case MVT::v4f32:
Evan Cheng5ee96892006-05-25 18:56:34 +00001579 case MVT::v2f64:
Evan Chengbfb5ea62006-05-26 19:22:06 +00001580 if (NumXMMRegs < 4)
Evan Cheng2a330942006-05-25 00:59:30 +00001581 NumXMMRegs++;
Evan Chengb92f4182006-05-26 20:37:47 +00001582 else {
1583 // XMM arguments have to be aligned on 16-byte boundary.
1584 NumBytes = ((NumBytes + 15) / 16) * 16;
Evan Cheng2a330942006-05-25 00:59:30 +00001585 NumBytes += 16;
Evan Chengb92f4182006-05-26 20:37:47 +00001586 }
Evan Cheng2a330942006-05-25 00:59:30 +00001587 break;
Chris Lattner76ac0682005-11-15 00:40:23 +00001588 }
Evan Cheng2a330942006-05-25 00:59:30 +00001589 }
Chris Lattner76ac0682005-11-15 00:40:23 +00001590
1591 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
1592 // arguments and the arguments after the retaddr has been pushed are aligned.
1593 if ((NumBytes & 7) == 0)
1594 NumBytes += 4;
1595
Chris Lattner62c34842006-02-13 09:00:43 +00001596 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));
Chris Lattner76ac0682005-11-15 00:40:23 +00001597
1598 // Arguments go on the stack in reverse order, as specified by the ABI.
1599 unsigned ArgOffset = 0;
Chris Lattner76ac0682005-11-15 00:40:23 +00001600 NumIntRegs = 0;
Evan Cheng2a330942006-05-25 00:59:30 +00001601 std::vector<std::pair<unsigned, SDOperand> > RegsToPass;
1602 std::vector<SDOperand> MemOpChains;
Evan Cheng11b0a5d2006-09-08 06:48:29 +00001603 SDOperand StackPtr = DAG.getRegister(X86StackPtr, getPointerTy());
Evan Cheng2a330942006-05-25 00:59:30 +00001604 for (unsigned i = 0; i != NumOps; ++i) {
1605 SDOperand Arg = Op.getOperand(5+2*i);
1606
1607 switch (Arg.getValueType()) {
Chris Lattner76ac0682005-11-15 00:40:23 +00001608 default: assert(0 && "Unexpected ValueType for argument!");
Chris Lattner76ac0682005-11-15 00:40:23 +00001609 case MVT::i8:
1610 case MVT::i16:
1611 case MVT::i32:
Evan Cheng38c5aee2006-06-24 08:36:10 +00001612#if FASTCC_NUM_INT_ARGS_INREGS > 0
Chris Lattner43798852006-03-17 05:10:20 +00001613 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
Evan Cheng2a330942006-05-25 00:59:30 +00001614 RegsToPass.push_back(
1615 std::make_pair(GPRArgRegs[Arg.getValueType()-MVT::i8][NumIntRegs],
1616 Arg));
Chris Lattner76ac0682005-11-15 00:40:23 +00001617 ++NumIntRegs;
1618 break;
1619 }
Evan Cheng38c5aee2006-06-24 08:36:10 +00001620#endif
Chris Lattner76ac0682005-11-15 00:40:23 +00001621 // Fall through
1622 case MVT::f32: {
1623 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
Evan Cheng2a330942006-05-25 00:59:30 +00001624 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
1625 MemOpChains.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
1626 Arg, PtrOff, DAG.getSrcValue(NULL)));
Chris Lattner76ac0682005-11-15 00:40:23 +00001627 ArgOffset += 4;
1628 break;
1629 }
Evan Cheng2a330942006-05-25 00:59:30 +00001630 case MVT::f64: {
Chris Lattner76ac0682005-11-15 00:40:23 +00001631 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
Evan Cheng2a330942006-05-25 00:59:30 +00001632 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
1633 MemOpChains.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
1634 Arg, PtrOff, DAG.getSrcValue(NULL)));
Chris Lattner76ac0682005-11-15 00:40:23 +00001635 ArgOffset += 8;
1636 break;
1637 }
Evan Cheng2a330942006-05-25 00:59:30 +00001638 case MVT::v16i8:
1639 case MVT::v8i16:
1640 case MVT::v4i32:
1641 case MVT::v2i64:
1642 case MVT::v4f32:
Evan Cheng5ee96892006-05-25 18:56:34 +00001643 case MVT::v2f64:
Evan Chengbfb5ea62006-05-26 19:22:06 +00001644 if (NumXMMRegs < 4) {
Evan Cheng2a330942006-05-25 00:59:30 +00001645 RegsToPass.push_back(std::make_pair(XMMArgRegs[NumXMMRegs], Arg));
1646 NumXMMRegs++;
1647 } else {
Evan Chengb92f4182006-05-26 20:37:47 +00001648 // XMM arguments have to be aligned on 16-byte boundary.
1649 ArgOffset = ((ArgOffset + 15) / 16) * 16;
Evan Cheng2a330942006-05-25 00:59:30 +00001650 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
1651 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
1652 MemOpChains.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
1653 Arg, PtrOff, DAG.getSrcValue(NULL)));
1654 ArgOffset += 16;
1655 }
1656 }
Chris Lattner76ac0682005-11-15 00:40:23 +00001657 }
Chris Lattner76ac0682005-11-15 00:40:23 +00001658
Evan Cheng2a330942006-05-25 00:59:30 +00001659 if (!MemOpChains.empty())
Chris Lattnerc24a1d32006-08-08 02:23:42 +00001660 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
1661 &MemOpChains[0], MemOpChains.size());
Chris Lattner76ac0682005-11-15 00:40:23 +00001662
Nate Begeman7e5496d2006-02-17 00:03:04 +00001663 // Build a sequence of copy-to-reg nodes chained together with token chain
1664 // and flag operands which copy the outgoing args into registers.
1665 SDOperand InFlag;
Evan Cheng2a330942006-05-25 00:59:30 +00001666 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1667 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second,
1668 InFlag);
Nate Begeman7e5496d2006-02-17 00:03:04 +00001669 InFlag = Chain.getValue(1);
1670 }
1671
Evan Cheng2a330942006-05-25 00:59:30 +00001672 // If the callee is a GlobalAddress node (quite common, every direct call is)
1673 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1674 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1675 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
1676 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
1677 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
1678
Nate Begeman7e5496d2006-02-17 00:03:04 +00001679 std::vector<MVT::ValueType> NodeTys;
1680 NodeTys.push_back(MVT::Other); // Returns a chain
1681 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
1682 std::vector<SDOperand> Ops;
1683 Ops.push_back(Chain);
1684 Ops.push_back(Callee);
Evan Chengca254862006-06-14 18:17:40 +00001685
1686 // Add argument registers to the end of the list so that they are known live
1687 // into the call.
1688 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1689 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1690 RegsToPass[i].second.getValueType()));
1691
Nate Begeman7e5496d2006-02-17 00:03:04 +00001692 if (InFlag.Val)
1693 Ops.push_back(InFlag);
1694
1695 // FIXME: Do not generate X86ISD::TAILCALL for now.
Chris Lattner3d826992006-05-16 06:45:34 +00001696 Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
Chris Lattnerc24a1d32006-08-08 02:23:42 +00001697 NodeTys, &Ops[0], Ops.size());
Nate Begeman7e5496d2006-02-17 00:03:04 +00001698 InFlag = Chain.getValue(1);
1699
1700 NodeTys.clear();
1701 NodeTys.push_back(MVT::Other); // Returns a chain
Evan Cheng2a330942006-05-25 00:59:30 +00001702 if (RetVT != MVT::Other)
1703 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
Nate Begeman7e5496d2006-02-17 00:03:04 +00001704 Ops.clear();
1705 Ops.push_back(Chain);
Evan Cheng2a330942006-05-25 00:59:30 +00001706 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
1707 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
Nate Begeman7e5496d2006-02-17 00:03:04 +00001708 Ops.push_back(InFlag);
Chris Lattnerc24a1d32006-08-08 02:23:42 +00001709 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size());
Evan Cheng2a330942006-05-25 00:59:30 +00001710 if (RetVT != MVT::Other)
1711 InFlag = Chain.getValue(1);
Nate Begeman7e5496d2006-02-17 00:03:04 +00001712
Evan Cheng2a330942006-05-25 00:59:30 +00001713 std::vector<SDOperand> ResultVals;
1714 NodeTys.clear();
1715 switch (RetVT) {
1716 default: assert(0 && "Unknown value type to return!");
1717 case MVT::Other: break;
1718 case MVT::i8:
1719 Chain = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag).getValue(1);
1720 ResultVals.push_back(Chain.getValue(0));
1721 NodeTys.push_back(MVT::i8);
1722 break;
1723 case MVT::i16:
1724 Chain = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag).getValue(1);
1725 ResultVals.push_back(Chain.getValue(0));
1726 NodeTys.push_back(MVT::i16);
1727 break;
1728 case MVT::i32:
1729 if (Op.Val->getValueType(1) == MVT::i32) {
1730 Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1);
1731 ResultVals.push_back(Chain.getValue(0));
1732 Chain = DAG.getCopyFromReg(Chain, X86::EDX, MVT::i32,
1733 Chain.getValue(2)).getValue(1);
1734 ResultVals.push_back(Chain.getValue(0));
1735 NodeTys.push_back(MVT::i32);
1736 } else {
1737 Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1);
1738 ResultVals.push_back(Chain.getValue(0));
Evan Cheng172fce72006-01-06 00:43:03 +00001739 }
Evan Cheng2a330942006-05-25 00:59:30 +00001740 NodeTys.push_back(MVT::i32);
1741 break;
1742 case MVT::v16i8:
1743 case MVT::v8i16:
1744 case MVT::v4i32:
1745 case MVT::v2i64:
1746 case MVT::v4f32:
1747 case MVT::v2f64:
Evan Cheng2a330942006-05-25 00:59:30 +00001748 Chain = DAG.getCopyFromReg(Chain, X86::XMM0, RetVT, InFlag).getValue(1);
1749 ResultVals.push_back(Chain.getValue(0));
1750 NodeTys.push_back(RetVT);
1751 break;
1752 case MVT::f32:
1753 case MVT::f64: {
1754 std::vector<MVT::ValueType> Tys;
1755 Tys.push_back(MVT::f64);
1756 Tys.push_back(MVT::Other);
1757 Tys.push_back(MVT::Flag);
1758 std::vector<SDOperand> Ops;
1759 Ops.push_back(Chain);
1760 Ops.push_back(InFlag);
Chris Lattnerc24a1d32006-08-08 02:23:42 +00001761 SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys,
1762 &Ops[0], Ops.size());
Evan Cheng2a330942006-05-25 00:59:30 +00001763 Chain = RetVal.getValue(1);
1764 InFlag = RetVal.getValue(2);
1765 if (X86ScalarSSE) {
1766 // FIXME: Currently the FST is flagged to the FP_GET_RESULT. This
1767 // shouldn't be necessary except that RFP cannot be live across
1768 // multiple blocks. When stackifier is fixed, they can be uncoupled.
1769 MachineFunction &MF = DAG.getMachineFunction();
1770 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
1771 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
1772 Tys.clear();
Nate Begeman7e5496d2006-02-17 00:03:04 +00001773 Tys.push_back(MVT::Other);
Evan Cheng2a330942006-05-25 00:59:30 +00001774 Ops.clear();
Nate Begeman7e5496d2006-02-17 00:03:04 +00001775 Ops.push_back(Chain);
Evan Cheng2a330942006-05-25 00:59:30 +00001776 Ops.push_back(RetVal);
1777 Ops.push_back(StackSlot);
1778 Ops.push_back(DAG.getValueType(RetVT));
Nate Begeman7e5496d2006-02-17 00:03:04 +00001779 Ops.push_back(InFlag);
Chris Lattnerc24a1d32006-08-08 02:23:42 +00001780 Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size());
Evan Cheng2a330942006-05-25 00:59:30 +00001781 RetVal = DAG.getLoad(RetVT, Chain, StackSlot,
1782 DAG.getSrcValue(NULL));
1783 Chain = RetVal.getValue(1);
1784 }
Evan Cheng172fce72006-01-06 00:43:03 +00001785
Evan Cheng2a330942006-05-25 00:59:30 +00001786 if (RetVT == MVT::f32 && !X86ScalarSSE)
1787 // FIXME: we would really like to remember that this FP_ROUND
1788 // operation is okay to eliminate if we allow excess FP precision.
1789 RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal);
1790 ResultVals.push_back(RetVal);
1791 NodeTys.push_back(RetVT);
1792 break;
1793 }
Chris Lattner76ac0682005-11-15 00:40:23 +00001794 }
Nate Begeman7e5496d2006-02-17 00:03:04 +00001795
Evan Cheng2a330942006-05-25 00:59:30 +00001796
1797 // If the function returns void, just return the chain.
1798 if (ResultVals.empty())
1799 return Chain;
1800
1801 // Otherwise, merge everything together with a MERGE_VALUES node.
1802 NodeTys.push_back(MVT::Other);
1803 ResultVals.push_back(Chain);
Chris Lattnerc24a1d32006-08-08 02:23:42 +00001804 SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys,
1805 &ResultVals[0], ResultVals.size());
Evan Cheng2a330942006-05-25 00:59:30 +00001806 return Res.getValue(Op.ResNo);
Chris Lattner76ac0682005-11-15 00:40:23 +00001807}
1808
1809SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) {
1810 if (ReturnAddrIndex == 0) {
1811 // Set up a frame object for the return address.
1812 MachineFunction &MF = DAG.getMachineFunction();
Evan Cheng11b0a5d2006-09-08 06:48:29 +00001813 if (Subtarget->is64Bit())
1814 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(8, -8);
1815 else
1816 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4);
Chris Lattner76ac0682005-11-15 00:40:23 +00001817 }
1818
Evan Cheng11b0a5d2006-09-08 06:48:29 +00001819 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy());
Chris Lattner76ac0682005-11-15 00:40:23 +00001820}
1821
1822
1823
1824std::pair<SDOperand, SDOperand> X86TargetLowering::
1825LowerFrameReturnAddress(bool isFrameAddress, SDOperand Chain, unsigned Depth,
1826 SelectionDAG &DAG) {
1827 SDOperand Result;
1828 if (Depth) // Depths > 0 not supported yet!
1829 Result = DAG.getConstant(0, getPointerTy());
1830 else {
1831 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG);
1832 if (!isFrameAddress)
1833 // Just load the return address
Evan Cheng11b0a5d2006-09-08 06:48:29 +00001834 Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI,
Chris Lattner76ac0682005-11-15 00:40:23 +00001835 DAG.getSrcValue(NULL));
1836 else
Evan Cheng11b0a5d2006-09-08 06:48:29 +00001837 Result = DAG.getNode(ISD::SUB, getPointerTy(), RetAddrFI,
1838 DAG.getConstant(4, getPointerTy()));
Chris Lattner76ac0682005-11-15 00:40:23 +00001839 }
1840 return std::make_pair(Result, Chain);
1841}
1842
Evan Cheng339edad2006-01-11 00:33:36 +00001843/// getCondBrOpcodeForX86CC - Returns the X86 conditional branch opcode
1844/// which corresponds to the condition code.
1845static unsigned getCondBrOpcodeForX86CC(unsigned X86CC) {
1846 switch (X86CC) {
1847 default: assert(0 && "Unknown X86 conditional code!");
1848 case X86ISD::COND_A: return X86::JA;
1849 case X86ISD::COND_AE: return X86::JAE;
1850 case X86ISD::COND_B: return X86::JB;
1851 case X86ISD::COND_BE: return X86::JBE;
1852 case X86ISD::COND_E: return X86::JE;
1853 case X86ISD::COND_G: return X86::JG;
1854 case X86ISD::COND_GE: return X86::JGE;
1855 case X86ISD::COND_L: return X86::JL;
1856 case X86ISD::COND_LE: return X86::JLE;
1857 case X86ISD::COND_NE: return X86::JNE;
1858 case X86ISD::COND_NO: return X86::JNO;
1859 case X86ISD::COND_NP: return X86::JNP;
1860 case X86ISD::COND_NS: return X86::JNS;
1861 case X86ISD::COND_O: return X86::JO;
1862 case X86ISD::COND_P: return X86::JP;
1863 case X86ISD::COND_S: return X86::JS;
1864 }
1865}
Chris Lattner76ac0682005-11-15 00:40:23 +00001866
Evan Cheng45df7f82006-01-30 23:41:35 +00001867/// translateX86CC - do a one to one translation of a ISD::CondCode to the X86
1868/// specific condition code. It returns a false if it cannot do a direct
Chris Lattner7a627672006-09-13 03:22:10 +00001869/// translation. X86CC is the translated CondCode. LHS/RHS are modified as
1870/// needed.
Evan Cheng78038292006-04-05 23:38:46 +00001871static bool translateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
Chris Lattner7a627672006-09-13 03:22:10 +00001872 unsigned &X86CC, SDOperand &LHS, SDOperand &RHS,
1873 SelectionDAG &DAG) {
Evan Cheng45df7f82006-01-30 23:41:35 +00001874 X86CC = X86ISD::COND_INVALID;
Evan Cheng172fce72006-01-06 00:43:03 +00001875 if (!isFP) {
Chris Lattner971e3392006-09-13 17:04:54 +00001876 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
1877 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
1878 // X > -1 -> X == 0, jump !sign.
1879 RHS = DAG.getConstant(0, RHS.getValueType());
1880 X86CC = X86ISD::COND_NS;
1881 return true;
1882 } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
1883 // X < 0 -> X == 0, jump on sign.
1884 X86CC = X86ISD::COND_S;
1885 return true;
1886 }
Chris Lattner7a627672006-09-13 03:22:10 +00001887 }
1888
Evan Cheng172fce72006-01-06 00:43:03 +00001889 switch (SetCCOpcode) {
1890 default: break;
1891 case ISD::SETEQ: X86CC = X86ISD::COND_E; break;
1892 case ISD::SETGT: X86CC = X86ISD::COND_G; break;
1893 case ISD::SETGE: X86CC = X86ISD::COND_GE; break;
1894 case ISD::SETLT: X86CC = X86ISD::COND_L; break;
1895 case ISD::SETLE: X86CC = X86ISD::COND_LE; break;
1896 case ISD::SETNE: X86CC = X86ISD::COND_NE; break;
1897 case ISD::SETULT: X86CC = X86ISD::COND_B; break;
1898 case ISD::SETUGT: X86CC = X86ISD::COND_A; break;
1899 case ISD::SETULE: X86CC = X86ISD::COND_BE; break;
1900 case ISD::SETUGE: X86CC = X86ISD::COND_AE; break;
1901 }
1902 } else {
1903 // On a floating point condition, the flags are set as follows:
1904 // ZF PF CF op
1905 // 0 | 0 | 0 | X > Y
1906 // 0 | 0 | 1 | X < Y
1907 // 1 | 0 | 0 | X == Y
1908 // 1 | 1 | 1 | unordered
Chris Lattner7a627672006-09-13 03:22:10 +00001909 bool Flip = false;
Evan Cheng172fce72006-01-06 00:43:03 +00001910 switch (SetCCOpcode) {
1911 default: break;
1912 case ISD::SETUEQ:
1913 case ISD::SETEQ: X86CC = X86ISD::COND_E; break;
Evan Chengb3b41c42006-04-17 07:24:10 +00001914 case ISD::SETOLT: Flip = true; // Fallthrough
Evan Cheng172fce72006-01-06 00:43:03 +00001915 case ISD::SETOGT:
1916 case ISD::SETGT: X86CC = X86ISD::COND_A; break;
Evan Chengb3b41c42006-04-17 07:24:10 +00001917 case ISD::SETOLE: Flip = true; // Fallthrough
Evan Cheng172fce72006-01-06 00:43:03 +00001918 case ISD::SETOGE:
1919 case ISD::SETGE: X86CC = X86ISD::COND_AE; break;
Evan Chengb3b41c42006-04-17 07:24:10 +00001920 case ISD::SETUGT: Flip = true; // Fallthrough
Evan Cheng172fce72006-01-06 00:43:03 +00001921 case ISD::SETULT:
1922 case ISD::SETLT: X86CC = X86ISD::COND_B; break;
Evan Chengb3b41c42006-04-17 07:24:10 +00001923 case ISD::SETUGE: Flip = true; // Fallthrough
Evan Cheng172fce72006-01-06 00:43:03 +00001924 case ISD::SETULE:
1925 case ISD::SETLE: X86CC = X86ISD::COND_BE; break;
1926 case ISD::SETONE:
1927 case ISD::SETNE: X86CC = X86ISD::COND_NE; break;
1928 case ISD::SETUO: X86CC = X86ISD::COND_P; break;
1929 case ISD::SETO: X86CC = X86ISD::COND_NP; break;
1930 }
Chris Lattner7a627672006-09-13 03:22:10 +00001931 if (Flip)
1932 std::swap(LHS, RHS);
Evan Cheng172fce72006-01-06 00:43:03 +00001933 }
Evan Cheng45df7f82006-01-30 23:41:35 +00001934
1935 return X86CC != X86ISD::COND_INVALID;
Evan Cheng172fce72006-01-06 00:43:03 +00001936}
1937
Evan Cheng339edad2006-01-11 00:33:36 +00001938/// hasFPCMov - is there a floating point cmov for the specific X86 condition
1939/// code. Current x86 isa includes the following FP cmov instructions:
Evan Cheng73a1ad92006-01-10 20:26:56 +00001940/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
Evan Cheng339edad2006-01-11 00:33:36 +00001941static bool hasFPCMov(unsigned X86CC) {
Evan Cheng73a1ad92006-01-10 20:26:56 +00001942 switch (X86CC) {
1943 default:
1944 return false;
1945 case X86ISD::COND_B:
1946 case X86ISD::COND_BE:
1947 case X86ISD::COND_E:
1948 case X86ISD::COND_P:
1949 case X86ISD::COND_A:
1950 case X86ISD::COND_AE:
1951 case X86ISD::COND_NE:
1952 case X86ISD::COND_NP:
1953 return true;
1954 }
1955}
1956
Evan Chengaf598d22006-03-13 23:18:16 +00001957/// DarwinGVRequiresExtraLoad - true if accessing the GV requires an extra
1958/// load. For Darwin, external and weak symbols are indirect, loading the value
1959/// at address GV rather then the value of GV itself. This means that the
1960/// GlobalAddress must be in the base or index register of the address, not the
1961/// GV offset field.
1962static bool DarwinGVRequiresExtraLoad(GlobalValue *GV) {
1963 return (GV->hasWeakLinkage() || GV->hasLinkOnceLinkage() ||
1964 (GV->isExternal() && !GV->hasNotBeenReadFromBytecode()));
1965}
1966
Evan Chengc995b452006-04-06 23:23:56 +00001967/// isUndefOrInRange - Op is either an undef node or a ConstantSDNode. Return
Evan Chengac847262006-04-07 21:53:05 +00001968/// true if Op is undef or if its value falls within the specified range (L, H].
Evan Chengc995b452006-04-06 23:23:56 +00001969static bool isUndefOrInRange(SDOperand Op, unsigned Low, unsigned Hi) {
1970 if (Op.getOpcode() == ISD::UNDEF)
1971 return true;
1972
1973 unsigned Val = cast<ConstantSDNode>(Op)->getValue();
Evan Chengac847262006-04-07 21:53:05 +00001974 return (Val >= Low && Val < Hi);
1975}
1976
1977/// isUndefOrEqual - Op is either an undef node or a ConstantSDNode. Return
1978/// true if Op is undef or if its value equal to the specified value.
1979static bool isUndefOrEqual(SDOperand Op, unsigned Val) {
1980 if (Op.getOpcode() == ISD::UNDEF)
1981 return true;
1982 return cast<ConstantSDNode>(Op)->getValue() == Val;
Evan Chengc995b452006-04-06 23:23:56 +00001983}
1984
Evan Cheng68ad48b2006-03-22 18:59:22 +00001985/// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand
1986/// specifies a shuffle of elements that is suitable for input to PSHUFD.
1987bool X86::isPSHUFDMask(SDNode *N) {
1988 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1989
1990 if (N->getNumOperands() != 4)
1991 return false;
1992
1993 // Check if the value doesn't reference the second vector.
Evan Chengb7fedff2006-03-29 23:07:14 +00001994 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
Evan Cheng99d72052006-03-31 00:30:29 +00001995 SDOperand Arg = N->getOperand(i);
1996 if (Arg.getOpcode() == ISD::UNDEF) continue;
1997 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
1998 if (cast<ConstantSDNode>(Arg)->getValue() >= 4)
Evan Chengb7fedff2006-03-29 23:07:14 +00001999 return false;
2000 }
2001
2002 return true;
2003}
2004
2005/// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand
Evan Cheng59a63552006-04-05 01:47:37 +00002006/// specifies a shuffle of elements that is suitable for input to PSHUFHW.
Evan Chengb7fedff2006-03-29 23:07:14 +00002007bool X86::isPSHUFHWMask(SDNode *N) {
2008 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2009
2010 if (N->getNumOperands() != 8)
2011 return false;
2012
2013 // Lower quadword copied in order.
2014 for (unsigned i = 0; i != 4; ++i) {
Evan Cheng99d72052006-03-31 00:30:29 +00002015 SDOperand Arg = N->getOperand(i);
2016 if (Arg.getOpcode() == ISD::UNDEF) continue;
2017 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2018 if (cast<ConstantSDNode>(Arg)->getValue() != i)
Evan Chengb7fedff2006-03-29 23:07:14 +00002019 return false;
2020 }
2021
2022 // Upper quadword shuffled.
2023 for (unsigned i = 4; i != 8; ++i) {
Evan Cheng99d72052006-03-31 00:30:29 +00002024 SDOperand Arg = N->getOperand(i);
2025 if (Arg.getOpcode() == ISD::UNDEF) continue;
2026 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2027 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
Evan Chengb7fedff2006-03-29 23:07:14 +00002028 if (Val < 4 || Val > 7)
2029 return false;
2030 }
2031
2032 return true;
2033}
2034
2035/// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand
Evan Cheng59a63552006-04-05 01:47:37 +00002036/// specifies a shuffle of elements that is suitable for input to PSHUFLW.
Evan Chengb7fedff2006-03-29 23:07:14 +00002037bool X86::isPSHUFLWMask(SDNode *N) {
2038 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2039
2040 if (N->getNumOperands() != 8)
2041 return false;
2042
2043 // Upper quadword copied in order.
Evan Chengac847262006-04-07 21:53:05 +00002044 for (unsigned i = 4; i != 8; ++i)
2045 if (!isUndefOrEqual(N->getOperand(i), i))
Evan Chengb7fedff2006-03-29 23:07:14 +00002046 return false;
Evan Chengb7fedff2006-03-29 23:07:14 +00002047
2048 // Lower quadword shuffled.
Evan Chengac847262006-04-07 21:53:05 +00002049 for (unsigned i = 0; i != 4; ++i)
2050 if (!isUndefOrInRange(N->getOperand(i), 0, 4))
Evan Chengb7fedff2006-03-29 23:07:14 +00002051 return false;
Evan Cheng68ad48b2006-03-22 18:59:22 +00002052
2053 return true;
2054}
2055
Evan Chengd27fb3e2006-03-24 01:18:28 +00002056/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
2057/// specifies a shuffle of elements that is suitable for input to SHUFP*.
Evan Cheng60f0b892006-04-20 08:58:49 +00002058static bool isSHUFPMask(std::vector<SDOperand> &N) {
2059 unsigned NumElems = N.size();
2060 if (NumElems != 2 && NumElems != 4) return false;
Evan Chengd27fb3e2006-03-24 01:18:28 +00002061
Evan Cheng60f0b892006-04-20 08:58:49 +00002062 unsigned Half = NumElems / 2;
2063 for (unsigned i = 0; i < Half; ++i)
2064 if (!isUndefOrInRange(N[i], 0, NumElems))
2065 return false;
2066 for (unsigned i = Half; i < NumElems; ++i)
2067 if (!isUndefOrInRange(N[i], NumElems, NumElems*2))
2068 return false;
Evan Chengd27fb3e2006-03-24 01:18:28 +00002069
2070 return true;
2071}
2072
Evan Cheng60f0b892006-04-20 08:58:49 +00002073bool X86::isSHUFPMask(SDNode *N) {
2074 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2075 std::vector<SDOperand> Ops(N->op_begin(), N->op_end());
2076 return ::isSHUFPMask(Ops);
2077}
2078
2079/// isCommutedSHUFP - Returns true if the shuffle mask is except
2080/// the reverse of what x86 shuffles want. x86 shuffles requires the lower
2081/// half elements to come from vector 1 (which would equal the dest.) and
2082/// the upper half to come from vector 2.
2083static bool isCommutedSHUFP(std::vector<SDOperand> &Ops) {
2084 unsigned NumElems = Ops.size();
2085 if (NumElems != 2 && NumElems != 4) return false;
2086
2087 unsigned Half = NumElems / 2;
2088 for (unsigned i = 0; i < Half; ++i)
2089 if (!isUndefOrInRange(Ops[i], NumElems, NumElems*2))
2090 return false;
2091 for (unsigned i = Half; i < NumElems; ++i)
2092 if (!isUndefOrInRange(Ops[i], 0, NumElems))
2093 return false;
2094 return true;
2095}
2096
2097static bool isCommutedSHUFP(SDNode *N) {
2098 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2099 std::vector<SDOperand> Ops(N->op_begin(), N->op_end());
2100 return isCommutedSHUFP(Ops);
2101}
2102
Evan Cheng2595a682006-03-24 02:58:06 +00002103/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
2104/// specifies a shuffle of elements that is suitable for input to MOVHLPS.
2105bool X86::isMOVHLPSMask(SDNode *N) {
2106 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2107
Evan Cheng1a194a52006-03-28 06:50:32 +00002108 if (N->getNumOperands() != 4)
Evan Cheng2595a682006-03-24 02:58:06 +00002109 return false;
2110
Evan Cheng1a194a52006-03-28 06:50:32 +00002111 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3
Evan Chengac847262006-04-07 21:53:05 +00002112 return isUndefOrEqual(N->getOperand(0), 6) &&
2113 isUndefOrEqual(N->getOperand(1), 7) &&
2114 isUndefOrEqual(N->getOperand(2), 2) &&
2115 isUndefOrEqual(N->getOperand(3), 3);
Evan Cheng1a194a52006-03-28 06:50:32 +00002116}
2117
Evan Chengc995b452006-04-06 23:23:56 +00002118/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
2119/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
2120bool X86::isMOVLPMask(SDNode *N) {
2121 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2122
2123 unsigned NumElems = N->getNumOperands();
2124 if (NumElems != 2 && NumElems != 4)
2125 return false;
2126
Evan Chengac847262006-04-07 21:53:05 +00002127 for (unsigned i = 0; i < NumElems/2; ++i)
2128 if (!isUndefOrEqual(N->getOperand(i), i + NumElems))
2129 return false;
Evan Chengc995b452006-04-06 23:23:56 +00002130
Evan Chengac847262006-04-07 21:53:05 +00002131 for (unsigned i = NumElems/2; i < NumElems; ++i)
2132 if (!isUndefOrEqual(N->getOperand(i), i))
2133 return false;
Evan Chengc995b452006-04-06 23:23:56 +00002134
2135 return true;
2136}
2137
2138/// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand
Evan Cheng7855e4d2006-04-19 20:35:22 +00002139/// specifies a shuffle of elements that is suitable for input to MOVHP{S|D}
2140/// and MOVLHPS.
Evan Chengc995b452006-04-06 23:23:56 +00002141bool X86::isMOVHPMask(SDNode *N) {
2142 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2143
2144 unsigned NumElems = N->getNumOperands();
2145 if (NumElems != 2 && NumElems != 4)
2146 return false;
2147
Evan Chengac847262006-04-07 21:53:05 +00002148 for (unsigned i = 0; i < NumElems/2; ++i)
2149 if (!isUndefOrEqual(N->getOperand(i), i))
2150 return false;
Evan Chengc995b452006-04-06 23:23:56 +00002151
2152 for (unsigned i = 0; i < NumElems/2; ++i) {
2153 SDOperand Arg = N->getOperand(i + NumElems/2);
Evan Chengac847262006-04-07 21:53:05 +00002154 if (!isUndefOrEqual(Arg, i + NumElems))
2155 return false;
Evan Chengc995b452006-04-06 23:23:56 +00002156 }
2157
2158 return true;
2159}
2160
Evan Cheng5df75882006-03-28 00:39:58 +00002161/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
2162/// specifies a shuffle of elements that is suitable for input to UNPCKL.
Evan Cheng60f0b892006-04-20 08:58:49 +00002163bool static isUNPCKLMask(std::vector<SDOperand> &N, bool V2IsSplat = false) {
2164 unsigned NumElems = N.size();
Evan Cheng5df75882006-03-28 00:39:58 +00002165 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
2166 return false;
2167
2168 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) {
Evan Cheng60f0b892006-04-20 08:58:49 +00002169 SDOperand BitI = N[i];
2170 SDOperand BitI1 = N[i+1];
Evan Chengac847262006-04-07 21:53:05 +00002171 if (!isUndefOrEqual(BitI, j))
2172 return false;
Evan Cheng60f0b892006-04-20 08:58:49 +00002173 if (V2IsSplat) {
2174 if (isUndefOrEqual(BitI1, NumElems))
2175 return false;
2176 } else {
2177 if (!isUndefOrEqual(BitI1, j + NumElems))
2178 return false;
2179 }
Evan Cheng5df75882006-03-28 00:39:58 +00002180 }
2181
2182 return true;
2183}
2184
Evan Cheng60f0b892006-04-20 08:58:49 +00002185bool X86::isUNPCKLMask(SDNode *N, bool V2IsSplat) {
2186 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2187 std::vector<SDOperand> Ops(N->op_begin(), N->op_end());
2188 return ::isUNPCKLMask(Ops, V2IsSplat);
2189}
2190
Evan Cheng2bc32802006-03-28 02:43:26 +00002191/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
2192/// specifies a shuffle of elements that is suitable for input to UNPCKH.
Evan Cheng60f0b892006-04-20 08:58:49 +00002193bool static isUNPCKHMask(std::vector<SDOperand> &N, bool V2IsSplat = false) {
2194 unsigned NumElems = N.size();
Evan Cheng2bc32802006-03-28 02:43:26 +00002195 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
2196 return false;
2197
2198 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) {
Evan Cheng60f0b892006-04-20 08:58:49 +00002199 SDOperand BitI = N[i];
2200 SDOperand BitI1 = N[i+1];
Evan Chengac847262006-04-07 21:53:05 +00002201 if (!isUndefOrEqual(BitI, j + NumElems/2))
2202 return false;
Evan Cheng60f0b892006-04-20 08:58:49 +00002203 if (V2IsSplat) {
2204 if (isUndefOrEqual(BitI1, NumElems))
2205 return false;
2206 } else {
2207 if (!isUndefOrEqual(BitI1, j + NumElems/2 + NumElems))
2208 return false;
2209 }
Evan Cheng2bc32802006-03-28 02:43:26 +00002210 }
2211
2212 return true;
2213}
2214
Evan Cheng60f0b892006-04-20 08:58:49 +00002215bool X86::isUNPCKHMask(SDNode *N, bool V2IsSplat) {
2216 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2217 std::vector<SDOperand> Ops(N->op_begin(), N->op_end());
2218 return ::isUNPCKHMask(Ops, V2IsSplat);
2219}
2220
Evan Chengf3b52c82006-04-05 07:20:06 +00002221/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
2222/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
2223/// <0, 0, 1, 1>
2224bool X86::isUNPCKL_v_undef_Mask(SDNode *N) {
2225 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2226
2227 unsigned NumElems = N->getNumOperands();
2228 if (NumElems != 4 && NumElems != 8 && NumElems != 16)
2229 return false;
2230
2231 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) {
2232 SDOperand BitI = N->getOperand(i);
2233 SDOperand BitI1 = N->getOperand(i+1);
2234
Evan Chengac847262006-04-07 21:53:05 +00002235 if (!isUndefOrEqual(BitI, j))
2236 return false;
2237 if (!isUndefOrEqual(BitI1, j))
2238 return false;
Evan Chengf3b52c82006-04-05 07:20:06 +00002239 }
2240
2241 return true;
2242}
2243
Evan Chenge8b51802006-04-21 01:05:10 +00002244/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
2245/// specifies a shuffle of elements that is suitable for input to MOVSS,
2246/// MOVSD, and MOVD, i.e. setting the lowest element.
2247static bool isMOVLMask(std::vector<SDOperand> &N) {
Evan Cheng60f0b892006-04-20 08:58:49 +00002248 unsigned NumElems = N.size();
Evan Chenge8b51802006-04-21 01:05:10 +00002249 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
Evan Cheng12ba3e22006-04-11 00:19:04 +00002250 return false;
2251
Evan Cheng60f0b892006-04-20 08:58:49 +00002252 if (!isUndefOrEqual(N[0], NumElems))
Evan Cheng12ba3e22006-04-11 00:19:04 +00002253 return false;
2254
2255 for (unsigned i = 1; i < NumElems; ++i) {
Evan Cheng60f0b892006-04-20 08:58:49 +00002256 SDOperand Arg = N[i];
Evan Cheng12ba3e22006-04-11 00:19:04 +00002257 if (!isUndefOrEqual(Arg, i))
2258 return false;
2259 }
2260
2261 return true;
2262}
Evan Chengf3b52c82006-04-05 07:20:06 +00002263
Evan Chenge8b51802006-04-21 01:05:10 +00002264bool X86::isMOVLMask(SDNode *N) {
Evan Cheng60f0b892006-04-20 08:58:49 +00002265 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2266 std::vector<SDOperand> Ops(N->op_begin(), N->op_end());
Evan Chenge8b51802006-04-21 01:05:10 +00002267 return ::isMOVLMask(Ops);
Evan Cheng60f0b892006-04-20 08:58:49 +00002268}
2269
Evan Chenge8b51802006-04-21 01:05:10 +00002270/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse
2271/// of what x86 movss want. X86 movs requires the lowest element to be lowest
Evan Cheng60f0b892006-04-20 08:58:49 +00002272/// element of vector 2 and the other elements to come from vector 1 in order.
Evan Cheng89c5d042006-09-08 01:50:06 +00002273static bool isCommutedMOVL(std::vector<SDOperand> &Ops, bool V2IsSplat = false,
2274 bool V2IsUndef = false) {
Evan Cheng60f0b892006-04-20 08:58:49 +00002275 unsigned NumElems = Ops.size();
Evan Chenge8b51802006-04-21 01:05:10 +00002276 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
Evan Cheng60f0b892006-04-20 08:58:49 +00002277 return false;
2278
2279 if (!isUndefOrEqual(Ops[0], 0))
2280 return false;
2281
2282 for (unsigned i = 1; i < NumElems; ++i) {
2283 SDOperand Arg = Ops[i];
Evan Cheng89c5d042006-09-08 01:50:06 +00002284 if (!(isUndefOrEqual(Arg, i+NumElems) ||
2285 (V2IsUndef && isUndefOrInRange(Arg, NumElems, NumElems*2)) ||
2286 (V2IsSplat && isUndefOrEqual(Arg, NumElems))))
2287 return false;
Evan Cheng60f0b892006-04-20 08:58:49 +00002288 }
2289
2290 return true;
2291}
2292
Evan Cheng89c5d042006-09-08 01:50:06 +00002293static bool isCommutedMOVL(SDNode *N, bool V2IsSplat = false,
2294 bool V2IsUndef = false) {
Evan Cheng60f0b892006-04-20 08:58:49 +00002295 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2296 std::vector<SDOperand> Ops(N->op_begin(), N->op_end());
Evan Cheng89c5d042006-09-08 01:50:06 +00002297 return isCommutedMOVL(Ops, V2IsSplat, V2IsUndef);
Evan Cheng60f0b892006-04-20 08:58:49 +00002298}
2299
Evan Cheng5d247f82006-04-14 21:59:03 +00002300/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
2301/// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
2302bool X86::isMOVSHDUPMask(SDNode *N) {
2303 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2304
2305 if (N->getNumOperands() != 4)
2306 return false;
2307
2308 // Expect 1, 1, 3, 3
2309 for (unsigned i = 0; i < 2; ++i) {
2310 SDOperand Arg = N->getOperand(i);
2311 if (Arg.getOpcode() == ISD::UNDEF) continue;
2312 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2313 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2314 if (Val != 1) return false;
2315 }
Evan Cheng6222cf22006-04-15 05:37:34 +00002316
2317 bool HasHi = false;
Evan Cheng5d247f82006-04-14 21:59:03 +00002318 for (unsigned i = 2; i < 4; ++i) {
2319 SDOperand Arg = N->getOperand(i);
2320 if (Arg.getOpcode() == ISD::UNDEF) continue;
2321 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2322 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2323 if (Val != 3) return false;
Evan Cheng6222cf22006-04-15 05:37:34 +00002324 HasHi = true;
Evan Cheng5d247f82006-04-14 21:59:03 +00002325 }
Evan Cheng65bb7202006-04-15 03:13:24 +00002326
Evan Cheng6222cf22006-04-15 05:37:34 +00002327 // Don't use movshdup if it can be done with a shufps.
2328 return HasHi;
Evan Cheng5d247f82006-04-14 21:59:03 +00002329}
2330
2331/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
2332/// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
2333bool X86::isMOVSLDUPMask(SDNode *N) {
2334 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2335
2336 if (N->getNumOperands() != 4)
2337 return false;
2338
2339 // Expect 0, 0, 2, 2
2340 for (unsigned i = 0; i < 2; ++i) {
2341 SDOperand Arg = N->getOperand(i);
2342 if (Arg.getOpcode() == ISD::UNDEF) continue;
2343 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2344 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2345 if (Val != 0) return false;
2346 }
Evan Cheng6222cf22006-04-15 05:37:34 +00002347
2348 bool HasHi = false;
Evan Cheng5d247f82006-04-14 21:59:03 +00002349 for (unsigned i = 2; i < 4; ++i) {
2350 SDOperand Arg = N->getOperand(i);
2351 if (Arg.getOpcode() == ISD::UNDEF) continue;
2352 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2353 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2354 if (Val != 2) return false;
Evan Cheng6222cf22006-04-15 05:37:34 +00002355 HasHi = true;
Evan Cheng5d247f82006-04-14 21:59:03 +00002356 }
Evan Cheng65bb7202006-04-15 03:13:24 +00002357
Evan Cheng6222cf22006-04-15 05:37:34 +00002358 // Don't use movshdup if it can be done with a shufps.
2359 return HasHi;
Evan Cheng5d247f82006-04-14 21:59:03 +00002360}
2361
Evan Chengd097e672006-03-22 02:53:00 +00002362/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies
2363/// a splat of a single element.
Evan Cheng5022b342006-04-17 20:43:08 +00002364static bool isSplatMask(SDNode *N) {
Evan Chengd097e672006-03-22 02:53:00 +00002365 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2366
Evan Chengd097e672006-03-22 02:53:00 +00002367 // This is a splat operation if each element of the permute is the same, and
2368 // if the value doesn't reference the second vector.
Evan Cheng4a1b0d32006-04-19 23:28:59 +00002369 unsigned NumElems = N->getNumOperands();
2370 SDOperand ElementBase;
2371 unsigned i = 0;
2372 for (; i != NumElems; ++i) {
2373 SDOperand Elt = N->getOperand(i);
2374 if (ConstantSDNode *EltV = dyn_cast<ConstantSDNode>(Elt)) {
2375 ElementBase = Elt;
2376 break;
2377 }
2378 }
2379
2380 if (!ElementBase.Val)
2381 return false;
2382
2383 for (; i != NumElems; ++i) {
Evan Cheng99d72052006-03-31 00:30:29 +00002384 SDOperand Arg = N->getOperand(i);
2385 if (Arg.getOpcode() == ISD::UNDEF) continue;
2386 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
Evan Cheng4a1b0d32006-04-19 23:28:59 +00002387 if (Arg != ElementBase) return false;
Evan Chengd097e672006-03-22 02:53:00 +00002388 }
2389
2390 // Make sure it is a splat of the first vector operand.
Evan Cheng4a1b0d32006-04-19 23:28:59 +00002391 return cast<ConstantSDNode>(ElementBase)->getValue() < NumElems;
Evan Chengd097e672006-03-22 02:53:00 +00002392}
2393
Evan Cheng5022b342006-04-17 20:43:08 +00002394/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies
2395/// a splat of a single element and it's a 2 or 4 element mask.
2396bool X86::isSplatMask(SDNode *N) {
2397 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2398
Evan Cheng4a1b0d32006-04-19 23:28:59 +00002399 // We can only splat 64-bit, and 32-bit quantities with a single instruction.
Evan Cheng5022b342006-04-17 20:43:08 +00002400 if (N->getNumOperands() != 4 && N->getNumOperands() != 2)
2401 return false;
2402 return ::isSplatMask(N);
2403}
2404
Evan Cheng8fdbdf22006-03-22 08:01:21 +00002405/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
2406/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP*
2407/// instructions.
2408unsigned X86::getShuffleSHUFImmediate(SDNode *N) {
Evan Chengd097e672006-03-22 02:53:00 +00002409 unsigned NumOperands = N->getNumOperands();
2410 unsigned Shift = (NumOperands == 4) ? 2 : 1;
2411 unsigned Mask = 0;
Evan Cheng8160fd32006-03-28 23:41:33 +00002412 for (unsigned i = 0; i < NumOperands; ++i) {
Evan Cheng99d72052006-03-31 00:30:29 +00002413 unsigned Val = 0;
2414 SDOperand Arg = N->getOperand(NumOperands-i-1);
2415 if (Arg.getOpcode() != ISD::UNDEF)
2416 Val = cast<ConstantSDNode>(Arg)->getValue();
Evan Chengd27fb3e2006-03-24 01:18:28 +00002417 if (Val >= NumOperands) Val -= NumOperands;
Evan Cheng8fdbdf22006-03-22 08:01:21 +00002418 Mask |= Val;
Evan Cheng8160fd32006-03-28 23:41:33 +00002419 if (i != NumOperands - 1)
2420 Mask <<= Shift;
2421 }
Evan Cheng8fdbdf22006-03-22 08:01:21 +00002422
2423 return Mask;
2424}
2425
Evan Chengb7fedff2006-03-29 23:07:14 +00002426/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
2427/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW
2428/// instructions.
2429unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) {
2430 unsigned Mask = 0;
2431 // 8 nodes, but we only care about the last 4.
2432 for (unsigned i = 7; i >= 4; --i) {
Evan Cheng99d72052006-03-31 00:30:29 +00002433 unsigned Val = 0;
2434 SDOperand Arg = N->getOperand(i);
2435 if (Arg.getOpcode() != ISD::UNDEF)
2436 Val = cast<ConstantSDNode>(Arg)->getValue();
Evan Chengb7fedff2006-03-29 23:07:14 +00002437 Mask |= (Val - 4);
2438 if (i != 4)
2439 Mask <<= 2;
2440 }
2441
2442 return Mask;
2443}
2444
2445/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
2446/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW
2447/// instructions.
2448unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) {
2449 unsigned Mask = 0;
2450 // 8 nodes, but we only care about the first 4.
2451 for (int i = 3; i >= 0; --i) {
Evan Cheng99d72052006-03-31 00:30:29 +00002452 unsigned Val = 0;
2453 SDOperand Arg = N->getOperand(i);
2454 if (Arg.getOpcode() != ISD::UNDEF)
2455 Val = cast<ConstantSDNode>(Arg)->getValue();
Evan Chengb7fedff2006-03-29 23:07:14 +00002456 Mask |= Val;
2457 if (i != 0)
2458 Mask <<= 2;
2459 }
2460
2461 return Mask;
2462}
2463
Evan Cheng59a63552006-04-05 01:47:37 +00002464/// isPSHUFHW_PSHUFLWMask - true if the specified VECTOR_SHUFFLE operand
2465/// specifies a 8 element shuffle that can be broken into a pair of
2466/// PSHUFHW and PSHUFLW.
2467static bool isPSHUFHW_PSHUFLWMask(SDNode *N) {
2468 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2469
2470 if (N->getNumOperands() != 8)
2471 return false;
2472
2473 // Lower quadword shuffled.
2474 for (unsigned i = 0; i != 4; ++i) {
2475 SDOperand Arg = N->getOperand(i);
2476 if (Arg.getOpcode() == ISD::UNDEF) continue;
2477 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2478 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2479 if (Val > 4)
2480 return false;
2481 }
2482
2483 // Upper quadword shuffled.
2484 for (unsigned i = 4; i != 8; ++i) {
2485 SDOperand Arg = N->getOperand(i);
2486 if (Arg.getOpcode() == ISD::UNDEF) continue;
2487 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2488 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2489 if (Val < 4 || Val > 7)
2490 return false;
2491 }
2492
2493 return true;
2494}
2495
Evan Chengc995b452006-04-06 23:23:56 +00002496/// CommuteVectorShuffle - Swap vector_shuffle operandsas well as
2497/// values in ther permute mask.
2498static SDOperand CommuteVectorShuffle(SDOperand Op, SelectionDAG &DAG) {
2499 SDOperand V1 = Op.getOperand(0);
2500 SDOperand V2 = Op.getOperand(1);
2501 SDOperand Mask = Op.getOperand(2);
2502 MVT::ValueType VT = Op.getValueType();
2503 MVT::ValueType MaskVT = Mask.getValueType();
2504 MVT::ValueType EltVT = MVT::getVectorBaseType(MaskVT);
2505 unsigned NumElems = Mask.getNumOperands();
2506 std::vector<SDOperand> MaskVec;
2507
2508 for (unsigned i = 0; i != NumElems; ++i) {
2509 SDOperand Arg = Mask.getOperand(i);
Evan Chenga3caaee2006-04-19 22:48:17 +00002510 if (Arg.getOpcode() == ISD::UNDEF) {
2511 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT));
2512 continue;
2513 }
Evan Chengc995b452006-04-06 23:23:56 +00002514 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2515 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2516 if (Val < NumElems)
2517 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT));
2518 else
2519 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT));
2520 }
2521
Chris Lattnerc24a1d32006-08-08 02:23:42 +00002522 Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size());
Evan Chengc995b452006-04-06 23:23:56 +00002523 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V2, V1, Mask);
2524}
2525
Evan Cheng7855e4d2006-04-19 20:35:22 +00002526/// ShouldXformToMOVHLPS - Return true if the node should be transformed to
2527/// match movhlps. The lower half elements should come from upper half of
2528/// V1 (and in order), and the upper half elements should come from the upper
2529/// half of V2 (and in order).
2530static bool ShouldXformToMOVHLPS(SDNode *Mask) {
2531 unsigned NumElems = Mask->getNumOperands();
2532 if (NumElems != 4)
2533 return false;
2534 for (unsigned i = 0, e = 2; i != e; ++i)
2535 if (!isUndefOrEqual(Mask->getOperand(i), i+2))
2536 return false;
2537 for (unsigned i = 2; i != 4; ++i)
2538 if (!isUndefOrEqual(Mask->getOperand(i), i+4))
2539 return false;
2540 return true;
2541}
2542
Evan Chengc995b452006-04-06 23:23:56 +00002543/// isScalarLoadToVector - Returns true if the node is a scalar load that
2544/// is promoted to a vector.
Evan Cheng7855e4d2006-04-19 20:35:22 +00002545static inline bool isScalarLoadToVector(SDNode *N) {
2546 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) {
2547 N = N->getOperand(0).Val;
2548 return (N->getOpcode() == ISD::LOAD);
Evan Chengc995b452006-04-06 23:23:56 +00002549 }
2550 return false;
2551}
2552
Evan Cheng7855e4d2006-04-19 20:35:22 +00002553/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to
2554/// match movlp{s|d}. The lower half elements should come from lower half of
2555/// V1 (and in order), and the upper half elements should come from the upper
2556/// half of V2 (and in order). And since V1 will become the source of the
2557/// MOVLP, it must be either a vector load or a scalar load to vector.
2558static bool ShouldXformToMOVLP(SDNode *V1, SDNode *Mask) {
2559 if (V1->getOpcode() != ISD::LOAD && !isScalarLoadToVector(V1))
2560 return false;
Evan Chengc995b452006-04-06 23:23:56 +00002561
Evan Cheng7855e4d2006-04-19 20:35:22 +00002562 unsigned NumElems = Mask->getNumOperands();
2563 if (NumElems != 2 && NumElems != 4)
2564 return false;
2565 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
2566 if (!isUndefOrEqual(Mask->getOperand(i), i))
2567 return false;
2568 for (unsigned i = NumElems/2; i != NumElems; ++i)
2569 if (!isUndefOrEqual(Mask->getOperand(i), i+NumElems))
2570 return false;
2571 return true;
Evan Chengc995b452006-04-06 23:23:56 +00002572}
2573
Evan Cheng60f0b892006-04-20 08:58:49 +00002574/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are
2575/// all the same.
2576static bool isSplatVector(SDNode *N) {
2577 if (N->getOpcode() != ISD::BUILD_VECTOR)
2578 return false;
Evan Chengc995b452006-04-06 23:23:56 +00002579
Evan Cheng60f0b892006-04-20 08:58:49 +00002580 SDOperand SplatValue = N->getOperand(0);
2581 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i)
2582 if (N->getOperand(i) != SplatValue)
Evan Chengc995b452006-04-06 23:23:56 +00002583 return false;
2584 return true;
2585}
2586
Evan Cheng89c5d042006-09-08 01:50:06 +00002587/// isUndefShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
2588/// to an undef.
2589static bool isUndefShuffle(SDNode *N) {
2590 if (N->getOpcode() != ISD::BUILD_VECTOR)
2591 return false;
2592
2593 SDOperand V1 = N->getOperand(0);
2594 SDOperand V2 = N->getOperand(1);
2595 SDOperand Mask = N->getOperand(2);
2596 unsigned NumElems = Mask.getNumOperands();
2597 for (unsigned i = 0; i != NumElems; ++i) {
2598 SDOperand Arg = Mask.getOperand(i);
2599 if (Arg.getOpcode() != ISD::UNDEF) {
2600 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2601 if (Val < NumElems && V1.getOpcode() != ISD::UNDEF)
2602 return false;
2603 else if (Val >= NumElems && V2.getOpcode() != ISD::UNDEF)
2604 return false;
2605 }
2606 }
2607 return true;
2608}
2609
Evan Cheng60f0b892006-04-20 08:58:49 +00002610/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
2611/// that point to V2 points to its first element.
2612static SDOperand NormalizeMask(SDOperand Mask, SelectionDAG &DAG) {
2613 assert(Mask.getOpcode() == ISD::BUILD_VECTOR);
2614
2615 bool Changed = false;
2616 std::vector<SDOperand> MaskVec;
2617 unsigned NumElems = Mask.getNumOperands();
2618 for (unsigned i = 0; i != NumElems; ++i) {
2619 SDOperand Arg = Mask.getOperand(i);
2620 if (Arg.getOpcode() != ISD::UNDEF) {
2621 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2622 if (Val > NumElems) {
2623 Arg = DAG.getConstant(NumElems, Arg.getValueType());
2624 Changed = true;
2625 }
2626 }
2627 MaskVec.push_back(Arg);
2628 }
2629
2630 if (Changed)
Chris Lattnerc24a1d32006-08-08 02:23:42 +00002631 Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(),
2632 &MaskVec[0], MaskVec.size());
Evan Cheng60f0b892006-04-20 08:58:49 +00002633 return Mask;
2634}
2635
Evan Chenge8b51802006-04-21 01:05:10 +00002636/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
2637/// operation of specified width.
2638static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) {
Evan Cheng60f0b892006-04-20 08:58:49 +00002639 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
2640 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT);
2641
2642 std::vector<SDOperand> MaskVec;
2643 MaskVec.push_back(DAG.getConstant(NumElems, BaseVT));
2644 for (unsigned i = 1; i != NumElems; ++i)
2645 MaskVec.push_back(DAG.getConstant(i, BaseVT));
Chris Lattnerc24a1d32006-08-08 02:23:42 +00002646 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size());
Evan Cheng60f0b892006-04-20 08:58:49 +00002647}
2648
Evan Cheng5022b342006-04-17 20:43:08 +00002649/// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation
2650/// of specified width.
2651static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) {
2652 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
2653 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT);
2654 std::vector<SDOperand> MaskVec;
2655 for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
2656 MaskVec.push_back(DAG.getConstant(i, BaseVT));
2657 MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT));
2658 }
Chris Lattnerc24a1d32006-08-08 02:23:42 +00002659 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size());
Evan Cheng5022b342006-04-17 20:43:08 +00002660}
2661
Evan Cheng60f0b892006-04-20 08:58:49 +00002662/// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation
2663/// of specified width.
2664static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) {
2665 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
2666 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT);
2667 unsigned Half = NumElems/2;
2668 std::vector<SDOperand> MaskVec;
2669 for (unsigned i = 0; i != Half; ++i) {
2670 MaskVec.push_back(DAG.getConstant(i + Half, BaseVT));
2671 MaskVec.push_back(DAG.getConstant(i + NumElems + Half, BaseVT));
2672 }
Chris Lattnerc24a1d32006-08-08 02:23:42 +00002673 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size());
Evan Cheng60f0b892006-04-20 08:58:49 +00002674}
2675
Evan Chenge8b51802006-04-21 01:05:10 +00002676/// getZeroVector - Returns a vector of specified type with all zero elements.
2677///
2678static SDOperand getZeroVector(MVT::ValueType VT, SelectionDAG &DAG) {
2679 assert(MVT::isVector(VT) && "Expected a vector type");
2680 unsigned NumElems = getVectorNumElements(VT);
2681 MVT::ValueType EVT = MVT::getVectorBaseType(VT);
2682 bool isFP = MVT::isFloatingPoint(EVT);
2683 SDOperand Zero = isFP ? DAG.getConstantFP(0.0, EVT) : DAG.getConstant(0, EVT);
2684 std::vector<SDOperand> ZeroVec(NumElems, Zero);
Chris Lattnerc24a1d32006-08-08 02:23:42 +00002685 return DAG.getNode(ISD::BUILD_VECTOR, VT, &ZeroVec[0], ZeroVec.size());
Evan Chenge8b51802006-04-21 01:05:10 +00002686}
2687
Evan Cheng5022b342006-04-17 20:43:08 +00002688/// PromoteSplat - Promote a splat of v8i16 or v16i8 to v4i32.
2689///
2690static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG) {
2691 SDOperand V1 = Op.getOperand(0);
Evan Chenge8b51802006-04-21 01:05:10 +00002692 SDOperand Mask = Op.getOperand(2);
Evan Cheng5022b342006-04-17 20:43:08 +00002693 MVT::ValueType VT = Op.getValueType();
Evan Chenge8b51802006-04-21 01:05:10 +00002694 unsigned NumElems = Mask.getNumOperands();
2695 Mask = getUnpacklMask(NumElems, DAG);
Evan Cheng5022b342006-04-17 20:43:08 +00002696 while (NumElems != 4) {
Evan Chenge8b51802006-04-21 01:05:10 +00002697 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, Mask);
Evan Cheng5022b342006-04-17 20:43:08 +00002698 NumElems >>= 1;
2699 }
2700 V1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, V1);
2701
2702 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
Evan Chenge8b51802006-04-21 01:05:10 +00002703 Mask = getZeroVector(MaskVT, DAG);
Evan Cheng5022b342006-04-17 20:43:08 +00002704 SDOperand Shuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v4i32, V1,
Evan Chenge8b51802006-04-21 01:05:10 +00002705 DAG.getNode(ISD::UNDEF, MVT::v4i32), Mask);
Evan Cheng5022b342006-04-17 20:43:08 +00002706 return DAG.getNode(ISD::BIT_CONVERT, VT, Shuffle);
2707}
2708
Evan Chenge8b51802006-04-21 01:05:10 +00002709/// isZeroNode - Returns true if Elt is a constant zero or a floating point
2710/// constant +0.0.
2711static inline bool isZeroNode(SDOperand Elt) {
2712 return ((isa<ConstantSDNode>(Elt) &&
2713 cast<ConstantSDNode>(Elt)->getValue() == 0) ||
2714 (isa<ConstantFPSDNode>(Elt) &&
2715 cast<ConstantFPSDNode>(Elt)->isExactlyValue(0.0)));
2716}
2717
Evan Cheng14215c32006-04-21 23:03:30 +00002718/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
2719/// vector and zero or undef vector.
2720static SDOperand getShuffleVectorZeroOrUndef(SDOperand V2, MVT::ValueType VT,
Evan Chenge8b51802006-04-21 01:05:10 +00002721 unsigned NumElems, unsigned Idx,
Evan Cheng14215c32006-04-21 23:03:30 +00002722 bool isZero, SelectionDAG &DAG) {
2723 SDOperand V1 = isZero ? getZeroVector(VT, DAG) : DAG.getNode(ISD::UNDEF, VT);
Evan Chenge8b51802006-04-21 01:05:10 +00002724 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
2725 MVT::ValueType EVT = MVT::getVectorBaseType(MaskVT);
2726 SDOperand Zero = DAG.getConstant(0, EVT);
2727 std::vector<SDOperand> MaskVec(NumElems, Zero);
2728 MaskVec[Idx] = DAG.getConstant(NumElems, EVT);
Chris Lattnerc24a1d32006-08-08 02:23:42 +00002729 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
2730 &MaskVec[0], MaskVec.size());
Evan Cheng14215c32006-04-21 23:03:30 +00002731 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask);
Evan Chenge8b51802006-04-21 01:05:10 +00002732}
2733
Evan Chengb0461082006-04-24 18:01:45 +00002734/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8.
2735///
2736static SDOperand LowerBuildVectorv16i8(SDOperand Op, unsigned NonZeros,
2737 unsigned NumNonZero, unsigned NumZero,
Evan Cheng11b0a5d2006-09-08 06:48:29 +00002738 SelectionDAG &DAG, TargetLowering &TLI) {
Evan Chengb0461082006-04-24 18:01:45 +00002739 if (NumNonZero > 8)
2740 return SDOperand();
2741
2742 SDOperand V(0, 0);
2743 bool First = true;
2744 for (unsigned i = 0; i < 16; ++i) {
2745 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
2746 if (ThisIsNonZero && First) {
2747 if (NumZero)
2748 V = getZeroVector(MVT::v8i16, DAG);
2749 else
2750 V = DAG.getNode(ISD::UNDEF, MVT::v8i16);
2751 First = false;
2752 }
2753
2754 if ((i & 1) != 0) {
2755 SDOperand ThisElt(0, 0), LastElt(0, 0);
2756 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
2757 if (LastIsNonZero) {
2758 LastElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i-1));
2759 }
2760 if (ThisIsNonZero) {
2761 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i));
2762 ThisElt = DAG.getNode(ISD::SHL, MVT::i16,
2763 ThisElt, DAG.getConstant(8, MVT::i8));
2764 if (LastIsNonZero)
2765 ThisElt = DAG.getNode(ISD::OR, MVT::i16, ThisElt, LastElt);
2766 } else
2767 ThisElt = LastElt;
2768
2769 if (ThisElt.Val)
2770 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, ThisElt,
Evan Cheng11b0a5d2006-09-08 06:48:29 +00002771 DAG.getConstant(i/2, TLI.getPointerTy()));
Evan Chengb0461082006-04-24 18:01:45 +00002772 }
2773 }
2774
2775 return DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, V);
2776}
2777
2778/// LowerBuildVectorv16i8 - Custom lower build_vector of v8i16.
2779///
2780static SDOperand LowerBuildVectorv8i16(SDOperand Op, unsigned NonZeros,
2781 unsigned NumNonZero, unsigned NumZero,
Evan Cheng11b0a5d2006-09-08 06:48:29 +00002782 SelectionDAG &DAG, TargetLowering &TLI) {
Evan Chengb0461082006-04-24 18:01:45 +00002783 if (NumNonZero > 4)
2784 return SDOperand();
2785
2786 SDOperand V(0, 0);
2787 bool First = true;
2788 for (unsigned i = 0; i < 8; ++i) {
2789 bool isNonZero = (NonZeros & (1 << i)) != 0;
2790 if (isNonZero) {
2791 if (First) {
2792 if (NumZero)
2793 V = getZeroVector(MVT::v8i16, DAG);
2794 else
2795 V = DAG.getNode(ISD::UNDEF, MVT::v8i16);
2796 First = false;
2797 }
2798 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, Op.getOperand(i),
Evan Cheng11b0a5d2006-09-08 06:48:29 +00002799 DAG.getConstant(i, TLI.getPointerTy()));
Evan Chengb0461082006-04-24 18:01:45 +00002800 }
2801 }
2802
2803 return V;
2804}
2805
Evan Chenga9467aa2006-04-25 20:13:52 +00002806SDOperand
2807X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) {
2808 // All zero's are handled with pxor.
2809 if (ISD::isBuildVectorAllZeros(Op.Val))
2810 return Op;
2811
2812 // All one's are handled with pcmpeqd.
2813 if (ISD::isBuildVectorAllOnes(Op.Val))
2814 return Op;
2815
2816 MVT::ValueType VT = Op.getValueType();
2817 MVT::ValueType EVT = MVT::getVectorBaseType(VT);
2818 unsigned EVTBits = MVT::getSizeInBits(EVT);
2819
2820 unsigned NumElems = Op.getNumOperands();
2821 unsigned NumZero = 0;
2822 unsigned NumNonZero = 0;
2823 unsigned NonZeros = 0;
2824 std::set<SDOperand> Values;
2825 for (unsigned i = 0; i < NumElems; ++i) {
2826 SDOperand Elt = Op.getOperand(i);
2827 if (Elt.getOpcode() != ISD::UNDEF) {
2828 Values.insert(Elt);
2829 if (isZeroNode(Elt))
2830 NumZero++;
2831 else {
2832 NonZeros |= (1 << i);
2833 NumNonZero++;
2834 }
2835 }
2836 }
2837
2838 if (NumNonZero == 0)
2839 // Must be a mix of zero and undef. Return a zero vector.
2840 return getZeroVector(VT, DAG);
2841
2842 // Splat is obviously ok. Let legalizer expand it to a shuffle.
2843 if (Values.size() == 1)
2844 return SDOperand();
2845
2846 // Special case for single non-zero element.
2847 if (NumNonZero == 1) {
2848 unsigned Idx = CountTrailingZeros_32(NonZeros);
2849 SDOperand Item = Op.getOperand(Idx);
2850 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item);
2851 if (Idx == 0)
2852 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
2853 return getShuffleVectorZeroOrUndef(Item, VT, NumElems, Idx,
2854 NumZero > 0, DAG);
2855
2856 if (EVTBits == 32) {
2857 // Turn it into a shuffle of zero and zero-extended scalar to vector.
2858 Item = getShuffleVectorZeroOrUndef(Item, VT, NumElems, 0, NumZero > 0,
2859 DAG);
2860 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
2861 MVT::ValueType MaskEVT = MVT::getVectorBaseType(MaskVT);
2862 std::vector<SDOperand> MaskVec;
2863 for (unsigned i = 0; i < NumElems; i++)
2864 MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT));
Chris Lattnerc24a1d32006-08-08 02:23:42 +00002865 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
2866 &MaskVec[0], MaskVec.size());
Evan Chenga9467aa2006-04-25 20:13:52 +00002867 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, Item,
2868 DAG.getNode(ISD::UNDEF, VT), Mask);
2869 }
2870 }
2871
2872 // Let legalizer expand 2-widde build_vector's.
2873 if (EVTBits == 64)
2874 return SDOperand();
2875
2876 // If element VT is < 32 bits, convert it to inserts into a zero vector.
2877 if (EVTBits == 8) {
Evan Cheng11b0a5d2006-09-08 06:48:29 +00002878 SDOperand V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG,
2879 *this);
Evan Chenga9467aa2006-04-25 20:13:52 +00002880 if (V.Val) return V;
2881 }
2882
2883 if (EVTBits == 16) {
Evan Cheng11b0a5d2006-09-08 06:48:29 +00002884 SDOperand V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG,
2885 *this);
Evan Chenga9467aa2006-04-25 20:13:52 +00002886 if (V.Val) return V;
2887 }
2888
2889 // If element VT is == 32 bits, turn it into a number of shuffles.
2890 std::vector<SDOperand> V(NumElems);
2891 if (NumElems == 4 && NumZero > 0) {
2892 for (unsigned i = 0; i < 4; ++i) {
2893 bool isZero = !(NonZeros & (1 << i));
2894 if (isZero)
2895 V[i] = getZeroVector(VT, DAG);
2896 else
2897 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i));
2898 }
2899
2900 for (unsigned i = 0; i < 2; ++i) {
2901 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
2902 default: break;
2903 case 0:
2904 V[i] = V[i*2]; // Must be a zero vector.
2905 break;
2906 case 1:
2907 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2+1], V[i*2],
2908 getMOVLMask(NumElems, DAG));
2909 break;
2910 case 2:
2911 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1],
2912 getMOVLMask(NumElems, DAG));
2913 break;
2914 case 3:
2915 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1],
2916 getUnpacklMask(NumElems, DAG));
2917 break;
2918 }
2919 }
2920
Evan Cheng9fee4422006-05-16 07:21:53 +00002921 // Take advantage of the fact GR32 to VR128 scalar_to_vector (i.e. movd)
Evan Chenga9467aa2006-04-25 20:13:52 +00002922 // clears the upper bits.
2923 // FIXME: we can do the same for v4f32 case when we know both parts of
2924 // the lower half come from scalar_to_vector (loadf32). We should do
2925 // that in post legalizer dag combiner with target specific hooks.
2926 if (MVT::isInteger(EVT) && (NonZeros & (0x3 << 2)) == 0)
2927 return V[0];
2928 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
2929 MVT::ValueType EVT = MVT::getVectorBaseType(MaskVT);
2930 std::vector<SDOperand> MaskVec;
2931 bool Reverse = (NonZeros & 0x3) == 2;
2932 for (unsigned i = 0; i < 2; ++i)
2933 if (Reverse)
2934 MaskVec.push_back(DAG.getConstant(1-i, EVT));
2935 else
2936 MaskVec.push_back(DAG.getConstant(i, EVT));
2937 Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2;
2938 for (unsigned i = 0; i < 2; ++i)
2939 if (Reverse)
2940 MaskVec.push_back(DAG.getConstant(1-i+NumElems, EVT));
2941 else
2942 MaskVec.push_back(DAG.getConstant(i+NumElems, EVT));
Chris Lattnered728e82006-08-11 17:38:39 +00002943 SDOperand ShufMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
2944 &MaskVec[0], MaskVec.size());
Evan Chenga9467aa2006-04-25 20:13:52 +00002945 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[0], V[1], ShufMask);
2946 }
2947
2948 if (Values.size() > 2) {
2949 // Expand into a number of unpckl*.
2950 // e.g. for v4f32
2951 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
2952 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
2953 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
2954 SDOperand UnpckMask = getUnpacklMask(NumElems, DAG);
2955 for (unsigned i = 0; i < NumElems; ++i)
2956 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i));
2957 NumElems >>= 1;
2958 while (NumElems != 0) {
2959 for (unsigned i = 0; i < NumElems; ++i)
2960 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i], V[i + NumElems],
2961 UnpckMask);
2962 NumElems >>= 1;
2963 }
2964 return V[0];
2965 }
2966
2967 return SDOperand();
2968}
2969
2970SDOperand
2971X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) {
2972 SDOperand V1 = Op.getOperand(0);
2973 SDOperand V2 = Op.getOperand(1);
2974 SDOperand PermMask = Op.getOperand(2);
2975 MVT::ValueType VT = Op.getValueType();
2976 unsigned NumElems = PermMask.getNumOperands();
2977 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
2978 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
2979
Evan Cheng89c5d042006-09-08 01:50:06 +00002980 if (isUndefShuffle(Op.Val))
2981 return DAG.getNode(ISD::UNDEF, VT);
2982
Evan Chenga9467aa2006-04-25 20:13:52 +00002983 if (isSplatMask(PermMask.Val)) {
2984 if (NumElems <= 4) return Op;
2985 // Promote it to a v4i32 splat.
2986 return PromoteSplat(Op, DAG);
2987 }
2988
2989 if (X86::isMOVLMask(PermMask.Val))
2990 return (V1IsUndef) ? V2 : Op;
2991
2992 if (X86::isMOVSHDUPMask(PermMask.Val) ||
2993 X86::isMOVSLDUPMask(PermMask.Val) ||
2994 X86::isMOVHLPSMask(PermMask.Val) ||
2995 X86::isMOVHPMask(PermMask.Val) ||
2996 X86::isMOVLPMask(PermMask.Val))
2997 return Op;
2998
2999 if (ShouldXformToMOVHLPS(PermMask.Val) ||
3000 ShouldXformToMOVLP(V1.Val, PermMask.Val))
3001 return CommuteVectorShuffle(Op, DAG);
3002
Evan Cheng89c5d042006-09-08 01:50:06 +00003003 bool V1IsSplat = isSplatVector(V1.Val);
3004 bool V2IsSplat = isSplatVector(V2.Val);
3005 if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) {
Evan Chenga9467aa2006-04-25 20:13:52 +00003006 Op = CommuteVectorShuffle(Op, DAG);
3007 V1 = Op.getOperand(0);
3008 V2 = Op.getOperand(1);
3009 PermMask = Op.getOperand(2);
Evan Cheng89c5d042006-09-08 01:50:06 +00003010 std::swap(V1IsSplat, V2IsSplat);
3011 std::swap(V1IsUndef, V2IsUndef);
Evan Chenga9467aa2006-04-25 20:13:52 +00003012 }
3013
Evan Cheng89c5d042006-09-08 01:50:06 +00003014 if (isCommutedMOVL(PermMask.Val, V2IsSplat, V2IsUndef)) {
Evan Chenga9467aa2006-04-25 20:13:52 +00003015 if (V2IsUndef) return V1;
3016 Op = CommuteVectorShuffle(Op, DAG);
3017 V1 = Op.getOperand(0);
3018 V2 = Op.getOperand(1);
3019 PermMask = Op.getOperand(2);
3020 if (V2IsSplat) {
3021 // V2 is a splat, so the mask may be malformed. That is, it may point
3022 // to any V2 element. The instruction selectior won't like this. Get
3023 // a corrected mask and commute to form a proper MOVS{S|D}.
3024 SDOperand NewMask = getMOVLMask(NumElems, DAG);
3025 if (NewMask.Val != PermMask.Val)
3026 Op = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask);
3027 }
3028 return Op;
3029 }
3030
3031 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) ||
3032 X86::isUNPCKLMask(PermMask.Val) ||
3033 X86::isUNPCKHMask(PermMask.Val))
3034 return Op;
3035
3036 if (V2IsSplat) {
3037 // Normalize mask so all entries that point to V2 points to its first
3038 // element then try to match unpck{h|l} again. If match, return a
3039 // new vector_shuffle with the corrected mask.
3040 SDOperand NewMask = NormalizeMask(PermMask, DAG);
3041 if (NewMask.Val != PermMask.Val) {
3042 if (X86::isUNPCKLMask(PermMask.Val, true)) {
3043 SDOperand NewMask = getUnpacklMask(NumElems, DAG);
3044 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask);
3045 } else if (X86::isUNPCKHMask(PermMask.Val, true)) {
3046 SDOperand NewMask = getUnpackhMask(NumElems, DAG);
3047 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask);
3048 }
3049 }
3050 }
3051
3052 // Normalize the node to match x86 shuffle ops if needed
3053 if (V2.getOpcode() != ISD::UNDEF)
3054 if (isCommutedSHUFP(PermMask.Val)) {
3055 Op = CommuteVectorShuffle(Op, DAG);
3056 V1 = Op.getOperand(0);
3057 V2 = Op.getOperand(1);
3058 PermMask = Op.getOperand(2);
3059 }
3060
3061 // If VT is integer, try PSHUF* first, then SHUFP*.
3062 if (MVT::isInteger(VT)) {
3063 if (X86::isPSHUFDMask(PermMask.Val) ||
3064 X86::isPSHUFHWMask(PermMask.Val) ||
3065 X86::isPSHUFLWMask(PermMask.Val)) {
3066 if (V2.getOpcode() != ISD::UNDEF)
3067 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1,
3068 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask);
3069 return Op;
3070 }
3071
3072 if (X86::isSHUFPMask(PermMask.Val))
3073 return Op;
3074
3075 // Handle v8i16 shuffle high / low shuffle node pair.
3076 if (VT == MVT::v8i16 && isPSHUFHW_PSHUFLWMask(PermMask.Val)) {
3077 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
3078 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT);
3079 std::vector<SDOperand> MaskVec;
3080 for (unsigned i = 0; i != 4; ++i)
3081 MaskVec.push_back(PermMask.getOperand(i));
3082 for (unsigned i = 4; i != 8; ++i)
3083 MaskVec.push_back(DAG.getConstant(i, BaseVT));
Chris Lattnered728e82006-08-11 17:38:39 +00003084 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3085 &MaskVec[0], MaskVec.size());
Evan Chenga9467aa2006-04-25 20:13:52 +00003086 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask);
3087 MaskVec.clear();
3088 for (unsigned i = 0; i != 4; ++i)
3089 MaskVec.push_back(DAG.getConstant(i, BaseVT));
3090 for (unsigned i = 4; i != 8; ++i)
3091 MaskVec.push_back(PermMask.getOperand(i));
Chris Lattnered728e82006-08-11 17:38:39 +00003092 Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0],MaskVec.size());
Evan Chenga9467aa2006-04-25 20:13:52 +00003093 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask);
3094 }
3095 } else {
3096 // Floating point cases in the other order.
3097 if (X86::isSHUFPMask(PermMask.Val))
3098 return Op;
3099 if (X86::isPSHUFDMask(PermMask.Val) ||
3100 X86::isPSHUFHWMask(PermMask.Val) ||
3101 X86::isPSHUFLWMask(PermMask.Val)) {
3102 if (V2.getOpcode() != ISD::UNDEF)
3103 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1,
3104 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask);
3105 return Op;
3106 }
3107 }
3108
3109 if (NumElems == 4) {
Evan Chenga9467aa2006-04-25 20:13:52 +00003110 MVT::ValueType MaskVT = PermMask.getValueType();
3111 MVT::ValueType MaskEVT = MVT::getVectorBaseType(MaskVT);
Evan Cheng3cd43622006-04-28 07:03:38 +00003112 std::vector<std::pair<int, int> > Locs;
3113 Locs.reserve(NumElems);
3114 std::vector<SDOperand> Mask1(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT));
3115 std::vector<SDOperand> Mask2(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT));
3116 unsigned NumHi = 0;
3117 unsigned NumLo = 0;
3118 // If no more than two elements come from either vector. This can be
3119 // implemented with two shuffles. First shuffle gather the elements.
3120 // The second shuffle, which takes the first shuffle as both of its
3121 // vector operands, put the elements into the right order.
3122 for (unsigned i = 0; i != NumElems; ++i) {
3123 SDOperand Elt = PermMask.getOperand(i);
3124 if (Elt.getOpcode() == ISD::UNDEF) {
3125 Locs[i] = std::make_pair(-1, -1);
3126 } else {
3127 unsigned Val = cast<ConstantSDNode>(Elt)->getValue();
3128 if (Val < NumElems) {
3129 Locs[i] = std::make_pair(0, NumLo);
3130 Mask1[NumLo] = Elt;
3131 NumLo++;
3132 } else {
3133 Locs[i] = std::make_pair(1, NumHi);
3134 if (2+NumHi < NumElems)
3135 Mask1[2+NumHi] = Elt;
3136 NumHi++;
3137 }
3138 }
3139 }
3140 if (NumLo <= 2 && NumHi <= 2) {
3141 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
Chris Lattnered728e82006-08-11 17:38:39 +00003142 DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3143 &Mask1[0], Mask1.size()));
Evan Cheng3cd43622006-04-28 07:03:38 +00003144 for (unsigned i = 0; i != NumElems; ++i) {
3145 if (Locs[i].first == -1)
3146 continue;
3147 else {
3148 unsigned Idx = (i < NumElems/2) ? 0 : NumElems;
3149 Idx += Locs[i].first * (NumElems/2) + Locs[i].second;
3150 Mask2[i] = DAG.getConstant(Idx, MaskEVT);
3151 }
3152 }
3153
3154 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1,
Chris Lattnered728e82006-08-11 17:38:39 +00003155 DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3156 &Mask2[0], Mask2.size()));
Evan Cheng3cd43622006-04-28 07:03:38 +00003157 }
3158
3159 // Break it into (shuffle shuffle_hi, shuffle_lo).
3160 Locs.clear();
Evan Chenga9467aa2006-04-25 20:13:52 +00003161 std::vector<SDOperand> LoMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT));
3162 std::vector<SDOperand> HiMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT));
3163 std::vector<SDOperand> *MaskPtr = &LoMask;
3164 unsigned MaskIdx = 0;
3165 unsigned LoIdx = 0;
3166 unsigned HiIdx = NumElems/2;
3167 for (unsigned i = 0; i != NumElems; ++i) {
3168 if (i == NumElems/2) {
3169 MaskPtr = &HiMask;
3170 MaskIdx = 1;
3171 LoIdx = 0;
3172 HiIdx = NumElems/2;
3173 }
3174 SDOperand Elt = PermMask.getOperand(i);
3175 if (Elt.getOpcode() == ISD::UNDEF) {
3176 Locs[i] = std::make_pair(-1, -1);
3177 } else if (cast<ConstantSDNode>(Elt)->getValue() < NumElems) {
3178 Locs[i] = std::make_pair(MaskIdx, LoIdx);
3179 (*MaskPtr)[LoIdx] = Elt;
3180 LoIdx++;
3181 } else {
3182 Locs[i] = std::make_pair(MaskIdx, HiIdx);
3183 (*MaskPtr)[HiIdx] = Elt;
3184 HiIdx++;
3185 }
3186 }
3187
Chris Lattner3d826992006-05-16 06:45:34 +00003188 SDOperand LoShuffle =
3189 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
Chris Lattnered728e82006-08-11 17:38:39 +00003190 DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3191 &LoMask[0], LoMask.size()));
Chris Lattner3d826992006-05-16 06:45:34 +00003192 SDOperand HiShuffle =
3193 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
Chris Lattnered728e82006-08-11 17:38:39 +00003194 DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3195 &HiMask[0], HiMask.size()));
Evan Chenga9467aa2006-04-25 20:13:52 +00003196 std::vector<SDOperand> MaskOps;
3197 for (unsigned i = 0; i != NumElems; ++i) {
3198 if (Locs[i].first == -1) {
3199 MaskOps.push_back(DAG.getNode(ISD::UNDEF, MaskEVT));
3200 } else {
3201 unsigned Idx = Locs[i].first * NumElems + Locs[i].second;
3202 MaskOps.push_back(DAG.getConstant(Idx, MaskEVT));
3203 }
3204 }
3205 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, LoShuffle, HiShuffle,
Chris Lattnered728e82006-08-11 17:38:39 +00003206 DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3207 &MaskOps[0], MaskOps.size()));
Evan Chenga9467aa2006-04-25 20:13:52 +00003208 }
3209
3210 return SDOperand();
3211}
3212
3213SDOperand
3214X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) {
3215 if (!isa<ConstantSDNode>(Op.getOperand(1)))
3216 return SDOperand();
3217
3218 MVT::ValueType VT = Op.getValueType();
3219 // TODO: handle v16i8.
3220 if (MVT::getSizeInBits(VT) == 16) {
3221 // Transform it so it match pextrw which produces a 32-bit result.
3222 MVT::ValueType EVT = (MVT::ValueType)(VT+1);
3223 SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, EVT,
3224 Op.getOperand(0), Op.getOperand(1));
3225 SDOperand Assert = DAG.getNode(ISD::AssertZext, EVT, Extract,
3226 DAG.getValueType(VT));
3227 return DAG.getNode(ISD::TRUNCATE, VT, Assert);
3228 } else if (MVT::getSizeInBits(VT) == 32) {
3229 SDOperand Vec = Op.getOperand(0);
3230 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
3231 if (Idx == 0)
3232 return Op;
Evan Chenga9467aa2006-04-25 20:13:52 +00003233 // SHUFPS the element to the lowest double word, then movss.
3234 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
Evan Chenga9467aa2006-04-25 20:13:52 +00003235 std::vector<SDOperand> IdxVec;
3236 IdxVec.push_back(DAG.getConstant(Idx, MVT::getVectorBaseType(MaskVT)));
3237 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
3238 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
3239 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
Chris Lattnered728e82006-08-11 17:38:39 +00003240 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3241 &IdxVec[0], IdxVec.size());
Evan Chenga9467aa2006-04-25 20:13:52 +00003242 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(),
3243 Vec, Vec, Mask);
3244 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec,
Evan Chengde7156f2006-06-15 08:14:54 +00003245 DAG.getConstant(0, getPointerTy()));
Evan Chenga9467aa2006-04-25 20:13:52 +00003246 } else if (MVT::getSizeInBits(VT) == 64) {
3247 SDOperand Vec = Op.getOperand(0);
3248 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
3249 if (Idx == 0)
3250 return Op;
3251
3252 // UNPCKHPD the element to the lowest double word, then movsd.
3253 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
3254 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
3255 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
3256 std::vector<SDOperand> IdxVec;
3257 IdxVec.push_back(DAG.getConstant(1, MVT::getVectorBaseType(MaskVT)));
3258 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
Chris Lattnered728e82006-08-11 17:38:39 +00003259 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3260 &IdxVec[0], IdxVec.size());
Evan Chenga9467aa2006-04-25 20:13:52 +00003261 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(),
3262 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask);
3263 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec,
Evan Chengde7156f2006-06-15 08:14:54 +00003264 DAG.getConstant(0, getPointerTy()));
Evan Chenga9467aa2006-04-25 20:13:52 +00003265 }
3266
3267 return SDOperand();
3268}
3269
3270SDOperand
3271X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) {
Evan Cheng9fee4422006-05-16 07:21:53 +00003272 // Transform it so it match pinsrw which expects a 16-bit value in a GR32
Evan Chenga9467aa2006-04-25 20:13:52 +00003273 // as its second argument.
3274 MVT::ValueType VT = Op.getValueType();
3275 MVT::ValueType BaseVT = MVT::getVectorBaseType(VT);
3276 SDOperand N0 = Op.getOperand(0);
3277 SDOperand N1 = Op.getOperand(1);
3278 SDOperand N2 = Op.getOperand(2);
3279 if (MVT::getSizeInBits(BaseVT) == 16) {
3280 if (N1.getValueType() != MVT::i32)
3281 N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1);
3282 if (N2.getValueType() != MVT::i32)
3283 N2 = DAG.getConstant(cast<ConstantSDNode>(N2)->getValue(), MVT::i32);
3284 return DAG.getNode(X86ISD::PINSRW, VT, N0, N1, N2);
3285 } else if (MVT::getSizeInBits(BaseVT) == 32) {
3286 unsigned Idx = cast<ConstantSDNode>(N2)->getValue();
3287 if (Idx == 0) {
3288 // Use a movss.
3289 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, N1);
3290 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
3291 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT);
3292 std::vector<SDOperand> MaskVec;
3293 MaskVec.push_back(DAG.getConstant(4, BaseVT));
3294 for (unsigned i = 1; i <= 3; ++i)
3295 MaskVec.push_back(DAG.getConstant(i, BaseVT));
3296 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, N0, N1,
Chris Lattnered728e82006-08-11 17:38:39 +00003297 DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3298 &MaskVec[0], MaskVec.size()));
Evan Chenga9467aa2006-04-25 20:13:52 +00003299 } else {
3300 // Use two pinsrw instructions to insert a 32 bit value.
3301 Idx <<= 1;
3302 if (MVT::isFloatingPoint(N1.getValueType())) {
3303 if (N1.getOpcode() == ISD::LOAD) {
Evan Cheng9fee4422006-05-16 07:21:53 +00003304 // Just load directly from f32mem to GR32.
Evan Chenga9467aa2006-04-25 20:13:52 +00003305 N1 = DAG.getLoad(MVT::i32, N1.getOperand(0), N1.getOperand(1),
3306 N1.getOperand(2));
3307 } else {
3308 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v4f32, N1);
3309 N1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, N1);
3310 N1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, N1,
Evan Chengde7156f2006-06-15 08:14:54 +00003311 DAG.getConstant(0, getPointerTy()));
Evan Chenga9467aa2006-04-25 20:13:52 +00003312 }
3313 }
3314 N0 = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, N0);
3315 N0 = DAG.getNode(X86ISD::PINSRW, MVT::v8i16, N0, N1,
Evan Chengde7156f2006-06-15 08:14:54 +00003316 DAG.getConstant(Idx, getPointerTy()));
Evan Chenga9467aa2006-04-25 20:13:52 +00003317 N1 = DAG.getNode(ISD::SRL, MVT::i32, N1, DAG.getConstant(16, MVT::i8));
3318 N0 = DAG.getNode(X86ISD::PINSRW, MVT::v8i16, N0, N1,
Evan Chengde7156f2006-06-15 08:14:54 +00003319 DAG.getConstant(Idx+1, getPointerTy()));
Evan Chenga9467aa2006-04-25 20:13:52 +00003320 return DAG.getNode(ISD::BIT_CONVERT, VT, N0);
3321 }
3322 }
3323
3324 return SDOperand();
3325}
3326
3327SDOperand
3328X86TargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) {
3329 SDOperand AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0));
3330 return DAG.getNode(X86ISD::S2VEC, Op.getValueType(), AnyExt);
3331}
3332
3333// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
3334// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
3335// one of the above mentioned nodes. It has to be wrapped because otherwise
3336// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
3337// be used to form addressing mode. These wrapped nodes will be selected
3338// into MOV32ri.
3339SDOperand
3340X86TargetLowering::LowerConstantPool(SDOperand Op, SelectionDAG &DAG) {
3341 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
3342 SDOperand Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(),
Evan Cheng9a083a42006-09-12 21:04:05 +00003343 DAG.getTargetConstantPool(CP->getConstVal(),
3344 getPointerTy(),
3345 CP->getAlignment()));
Evan Chenga9467aa2006-04-25 20:13:52 +00003346 if (Subtarget->isTargetDarwin()) {
3347 // With PIC, the address is actually $g + Offset.
Evan Cheng11b0a5d2006-09-08 06:48:29 +00003348 if (!Subtarget->is64Bit() &&
3349 getTargetMachine().getRelocationModel() == Reloc::PIC_)
Evan Chenga9467aa2006-04-25 20:13:52 +00003350 Result = DAG.getNode(ISD::ADD, getPointerTy(),
3351 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), Result);
3352 }
3353
3354 return Result;
3355}
3356
3357SDOperand
3358X86TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) {
3359 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3360 SDOperand Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(),
Chris Lattner3d826992006-05-16 06:45:34 +00003361 DAG.getTargetGlobalAddress(GV,
3362 getPointerTy()));
Evan Chenga9467aa2006-04-25 20:13:52 +00003363 if (Subtarget->isTargetDarwin()) {
3364 // With PIC, the address is actually $g + Offset.
Evan Cheng11b0a5d2006-09-08 06:48:29 +00003365 if (!Subtarget->is64Bit() &&
3366 getTargetMachine().getRelocationModel() == Reloc::PIC_)
Evan Chenga9467aa2006-04-25 20:13:52 +00003367 Result = DAG.getNode(ISD::ADD, getPointerTy(),
Chris Lattner3d826992006-05-16 06:45:34 +00003368 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
3369 Result);
Evan Chenga9467aa2006-04-25 20:13:52 +00003370
3371 // For Darwin, external and weak symbols are indirect, so we want to load
3372 // the value at address GV, not the value of GV itself. This means that
3373 // the GlobalAddress must be in the base or index register of the address,
3374 // not the GV offset field.
3375 if (getTargetMachine().getRelocationModel() != Reloc::Static &&
3376 DarwinGVRequiresExtraLoad(GV))
Evan Cheng11b0a5d2006-09-08 06:48:29 +00003377 Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(),
Evan Chenga9467aa2006-04-25 20:13:52 +00003378 Result, DAG.getSrcValue(NULL));
3379 }
3380
3381 return Result;
3382}
3383
3384SDOperand
3385X86TargetLowering::LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG) {
3386 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
3387 SDOperand Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(),
Chris Lattner3d826992006-05-16 06:45:34 +00003388 DAG.getTargetExternalSymbol(Sym,
3389 getPointerTy()));
Evan Chenga9467aa2006-04-25 20:13:52 +00003390 if (Subtarget->isTargetDarwin()) {
3391 // With PIC, the address is actually $g + Offset.
Evan Cheng11b0a5d2006-09-08 06:48:29 +00003392 if (!Subtarget->is64Bit() &&
3393 getTargetMachine().getRelocationModel() == Reloc::PIC_)
Evan Chenga9467aa2006-04-25 20:13:52 +00003394 Result = DAG.getNode(ISD::ADD, getPointerTy(),
Chris Lattner3d826992006-05-16 06:45:34 +00003395 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
3396 Result);
Evan Chenga9467aa2006-04-25 20:13:52 +00003397 }
3398
3399 return Result;
3400}
3401
3402SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) {
Evan Cheng9c249c32006-01-09 18:33:28 +00003403 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 &&
3404 "Not an i64 shift!");
3405 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
3406 SDOperand ShOpLo = Op.getOperand(0);
3407 SDOperand ShOpHi = Op.getOperand(1);
3408 SDOperand ShAmt = Op.getOperand(2);
Evan Cheng4259a0f2006-09-11 02:19:56 +00003409 SDOperand Tmp1 = isSRA ?
3410 DAG.getNode(ISD::SRA, MVT::i32, ShOpHi, DAG.getConstant(31, MVT::i8)) :
3411 DAG.getConstant(0, MVT::i32);
Evan Cheng9c249c32006-01-09 18:33:28 +00003412
3413 SDOperand Tmp2, Tmp3;
3414 if (Op.getOpcode() == ISD::SHL_PARTS) {
3415 Tmp2 = DAG.getNode(X86ISD::SHLD, MVT::i32, ShOpHi, ShOpLo, ShAmt);
3416 Tmp3 = DAG.getNode(ISD::SHL, MVT::i32, ShOpLo, ShAmt);
3417 } else {
3418 Tmp2 = DAG.getNode(X86ISD::SHRD, MVT::i32, ShOpLo, ShOpHi, ShAmt);
Evan Cheng267ba592006-01-19 01:46:14 +00003419 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, MVT::i32, ShOpHi, ShAmt);
Evan Cheng9c249c32006-01-09 18:33:28 +00003420 }
3421
Evan Cheng4259a0f2006-09-11 02:19:56 +00003422 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);
3423 SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt,
3424 DAG.getConstant(32, MVT::i8));
3425 SDOperand COps[]={DAG.getEntryNode(), AndNode, DAG.getConstant(0, MVT::i8)};
3426 SDOperand InFlag = DAG.getNode(X86ISD::CMP, VTs, 2, COps, 3).getValue(1);
Evan Cheng9c249c32006-01-09 18:33:28 +00003427
3428 SDOperand Hi, Lo;
Evan Cheng77fa9192006-01-09 20:49:21 +00003429 SDOperand CC = DAG.getConstant(X86ISD::COND_NE, MVT::i8);
Evan Cheng9c249c32006-01-09 18:33:28 +00003430
Evan Cheng4259a0f2006-09-11 02:19:56 +00003431 VTs = DAG.getNodeValueTypes(MVT::i32, MVT::Flag);
3432 SmallVector<SDOperand, 4> Ops;
Evan Cheng9c249c32006-01-09 18:33:28 +00003433 if (Op.getOpcode() == ISD::SHL_PARTS) {
3434 Ops.push_back(Tmp2);
3435 Ops.push_back(Tmp3);
3436 Ops.push_back(CC);
3437 Ops.push_back(InFlag);
Evan Cheng4259a0f2006-09-11 02:19:56 +00003438 Hi = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
Evan Cheng9c249c32006-01-09 18:33:28 +00003439 InFlag = Hi.getValue(1);
3440
3441 Ops.clear();
3442 Ops.push_back(Tmp3);
3443 Ops.push_back(Tmp1);
3444 Ops.push_back(CC);
3445 Ops.push_back(InFlag);
Evan Cheng4259a0f2006-09-11 02:19:56 +00003446 Lo = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
Evan Cheng9c249c32006-01-09 18:33:28 +00003447 } else {
3448 Ops.push_back(Tmp2);
3449 Ops.push_back(Tmp3);
3450 Ops.push_back(CC);
Evan Cheng12181af2006-01-09 22:29:54 +00003451 Ops.push_back(InFlag);
Evan Cheng4259a0f2006-09-11 02:19:56 +00003452 Lo = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
Evan Cheng9c249c32006-01-09 18:33:28 +00003453 InFlag = Lo.getValue(1);
3454
3455 Ops.clear();
3456 Ops.push_back(Tmp3);
3457 Ops.push_back(Tmp1);
3458 Ops.push_back(CC);
3459 Ops.push_back(InFlag);
Evan Cheng4259a0f2006-09-11 02:19:56 +00003460 Hi = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
Evan Cheng9c249c32006-01-09 18:33:28 +00003461 }
3462
Evan Cheng4259a0f2006-09-11 02:19:56 +00003463 VTs = DAG.getNodeValueTypes(MVT::i32, MVT::i32);
Evan Cheng9c249c32006-01-09 18:33:28 +00003464 Ops.clear();
3465 Ops.push_back(Lo);
3466 Ops.push_back(Hi);
Evan Cheng4259a0f2006-09-11 02:19:56 +00003467 return DAG.getNode(ISD::MERGE_VALUES, VTs, 2, &Ops[0], Ops.size());
Evan Chenga9467aa2006-04-25 20:13:52 +00003468}
Evan Cheng6305e502006-01-12 22:54:21 +00003469
Evan Chenga9467aa2006-04-25 20:13:52 +00003470SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) {
3471 assert(Op.getOperand(0).getValueType() <= MVT::i64 &&
3472 Op.getOperand(0).getValueType() >= MVT::i16 &&
3473 "Unknown SINT_TO_FP to lower!");
3474
3475 SDOperand Result;
3476 MVT::ValueType SrcVT = Op.getOperand(0).getValueType();
3477 unsigned Size = MVT::getSizeInBits(SrcVT)/8;
3478 MachineFunction &MF = DAG.getMachineFunction();
3479 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
3480 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
3481 SDOperand Chain = DAG.getNode(ISD::STORE, MVT::Other,
3482 DAG.getEntryNode(), Op.getOperand(0),
3483 StackSlot, DAG.getSrcValue(NULL));
3484
3485 // Build the FILD
3486 std::vector<MVT::ValueType> Tys;
3487 Tys.push_back(MVT::f64);
3488 Tys.push_back(MVT::Other);
3489 if (X86ScalarSSE) Tys.push_back(MVT::Flag);
3490 std::vector<SDOperand> Ops;
3491 Ops.push_back(Chain);
3492 Ops.push_back(StackSlot);
3493 Ops.push_back(DAG.getValueType(SrcVT));
3494 Result = DAG.getNode(X86ScalarSSE ? X86ISD::FILD_FLAG :X86ISD::FILD,
Chris Lattnerc24a1d32006-08-08 02:23:42 +00003495 Tys, &Ops[0], Ops.size());
Evan Chenga9467aa2006-04-25 20:13:52 +00003496
3497 if (X86ScalarSSE) {
3498 Chain = Result.getValue(1);
3499 SDOperand InFlag = Result.getValue(2);
3500
3501 // FIXME: Currently the FST is flagged to the FILD_FLAG. This
3502 // shouldn't be necessary except that RFP cannot be live across
3503 // multiple blocks. When stackifier is fixed, they can be uncoupled.
Chris Lattner76ac0682005-11-15 00:40:23 +00003504 MachineFunction &MF = DAG.getMachineFunction();
Evan Chenga9467aa2006-04-25 20:13:52 +00003505 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
Chris Lattner76ac0682005-11-15 00:40:23 +00003506 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
Evan Cheng6305e502006-01-12 22:54:21 +00003507 std::vector<MVT::ValueType> Tys;
Evan Cheng5b97fcf2006-01-30 08:02:57 +00003508 Tys.push_back(MVT::Other);
Chris Lattner76ac0682005-11-15 00:40:23 +00003509 std::vector<SDOperand> Ops;
Evan Cheng6305e502006-01-12 22:54:21 +00003510 Ops.push_back(Chain);
Evan Chenga9467aa2006-04-25 20:13:52 +00003511 Ops.push_back(Result);
Chris Lattner76ac0682005-11-15 00:40:23 +00003512 Ops.push_back(StackSlot);
Evan Chenga9467aa2006-04-25 20:13:52 +00003513 Ops.push_back(DAG.getValueType(Op.getValueType()));
3514 Ops.push_back(InFlag);
Chris Lattnerc24a1d32006-08-08 02:23:42 +00003515 Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size());
Evan Chenga9467aa2006-04-25 20:13:52 +00003516 Result = DAG.getLoad(Op.getValueType(), Chain, StackSlot,
3517 DAG.getSrcValue(NULL));
Chris Lattner76ac0682005-11-15 00:40:23 +00003518 }
Chris Lattner76ac0682005-11-15 00:40:23 +00003519
Evan Chenga9467aa2006-04-25 20:13:52 +00003520 return Result;
3521}
3522
3523SDOperand X86TargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) {
3524 assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 &&
3525 "Unknown FP_TO_SINT to lower!");
3526 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary
3527 // stack slot.
3528 MachineFunction &MF = DAG.getMachineFunction();
3529 unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8;
3530 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
3531 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
3532
3533 unsigned Opc;
3534 switch (Op.getValueType()) {
Chris Lattner76ac0682005-11-15 00:40:23 +00003535 default: assert(0 && "Invalid FP_TO_SINT to lower!");
3536 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
3537 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
3538 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
Evan Chenga9467aa2006-04-25 20:13:52 +00003539 }
Chris Lattner76ac0682005-11-15 00:40:23 +00003540
Evan Chenga9467aa2006-04-25 20:13:52 +00003541 SDOperand Chain = DAG.getEntryNode();
3542 SDOperand Value = Op.getOperand(0);
3543 if (X86ScalarSSE) {
3544 assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!");
3545 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value, StackSlot,
3546 DAG.getSrcValue(0));
3547 std::vector<MVT::ValueType> Tys;
3548 Tys.push_back(MVT::f64);
3549 Tys.push_back(MVT::Other);
Chris Lattner76ac0682005-11-15 00:40:23 +00003550 std::vector<SDOperand> Ops;
Evan Cheng5b97fcf2006-01-30 08:02:57 +00003551 Ops.push_back(Chain);
Chris Lattner76ac0682005-11-15 00:40:23 +00003552 Ops.push_back(StackSlot);
Evan Chenga9467aa2006-04-25 20:13:52 +00003553 Ops.push_back(DAG.getValueType(Op.getOperand(0).getValueType()));
Chris Lattnerc24a1d32006-08-08 02:23:42 +00003554 Value = DAG.getNode(X86ISD::FLD, Tys, &Ops[0], Ops.size());
Evan Chenga9467aa2006-04-25 20:13:52 +00003555 Chain = Value.getValue(1);
3556 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
3557 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
3558 }
Chris Lattner76ac0682005-11-15 00:40:23 +00003559
Evan Chenga9467aa2006-04-25 20:13:52 +00003560 // Build the FP_TO_INT*_IN_MEM
3561 std::vector<SDOperand> Ops;
3562 Ops.push_back(Chain);
3563 Ops.push_back(Value);
3564 Ops.push_back(StackSlot);
Evan Cheng5c68bba2006-08-11 07:35:45 +00003565 SDOperand FIST = DAG.getNode(Opc, MVT::Other, &Ops[0], Ops.size());
Evan Cheng172fce72006-01-06 00:43:03 +00003566
Evan Chenga9467aa2006-04-25 20:13:52 +00003567 // Load the result.
3568 return DAG.getLoad(Op.getValueType(), FIST, StackSlot,
3569 DAG.getSrcValue(NULL));
3570}
3571
3572SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) {
3573 MVT::ValueType VT = Op.getValueType();
3574 const Type *OpNTy = MVT::getTypeForValueType(VT);
3575 std::vector<Constant*> CV;
3576 if (VT == MVT::f64) {
3577 CV.push_back(ConstantFP::get(OpNTy, BitsToDouble(~(1ULL << 63))));
3578 CV.push_back(ConstantFP::get(OpNTy, 0.0));
3579 } else {
3580 CV.push_back(ConstantFP::get(OpNTy, BitsToFloat(~(1U << 31))));
3581 CV.push_back(ConstantFP::get(OpNTy, 0.0));
3582 CV.push_back(ConstantFP::get(OpNTy, 0.0));
3583 CV.push_back(ConstantFP::get(OpNTy, 0.0));
3584 }
3585 Constant *CS = ConstantStruct::get(CV);
3586 SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4);
Evan Chengbd1c5a82006-08-11 09:08:15 +00003587 std::vector<MVT::ValueType> Tys;
3588 Tys.push_back(VT);
3589 Tys.push_back(MVT::Other);
3590 SmallVector<SDOperand, 3> Ops;
3591 Ops.push_back(DAG.getEntryNode());
3592 Ops.push_back(CPIdx);
3593 Ops.push_back(DAG.getSrcValue(NULL));
3594 SDOperand Mask = DAG.getNode(X86ISD::LOAD_PACK, Tys, &Ops[0], Ops.size());
Evan Chenga9467aa2006-04-25 20:13:52 +00003595 return DAG.getNode(X86ISD::FAND, VT, Op.getOperand(0), Mask);
3596}
3597
3598SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) {
3599 MVT::ValueType VT = Op.getValueType();
3600 const Type *OpNTy = MVT::getTypeForValueType(VT);
3601 std::vector<Constant*> CV;
3602 if (VT == MVT::f64) {
3603 CV.push_back(ConstantFP::get(OpNTy, BitsToDouble(1ULL << 63)));
3604 CV.push_back(ConstantFP::get(OpNTy, 0.0));
3605 } else {
3606 CV.push_back(ConstantFP::get(OpNTy, BitsToFloat(1U << 31)));
3607 CV.push_back(ConstantFP::get(OpNTy, 0.0));
3608 CV.push_back(ConstantFP::get(OpNTy, 0.0));
3609 CV.push_back(ConstantFP::get(OpNTy, 0.0));
3610 }
3611 Constant *CS = ConstantStruct::get(CV);
3612 SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4);
Evan Chengbd1c5a82006-08-11 09:08:15 +00003613 std::vector<MVT::ValueType> Tys;
3614 Tys.push_back(VT);
3615 Tys.push_back(MVT::Other);
3616 SmallVector<SDOperand, 3> Ops;
3617 Ops.push_back(DAG.getEntryNode());
3618 Ops.push_back(CPIdx);
3619 Ops.push_back(DAG.getSrcValue(NULL));
3620 SDOperand Mask = DAG.getNode(X86ISD::LOAD_PACK, Tys, &Ops[0], Ops.size());
Evan Chenga9467aa2006-04-25 20:13:52 +00003621 return DAG.getNode(X86ISD::FXOR, VT, Op.getOperand(0), Mask);
3622}
3623
Evan Cheng4259a0f2006-09-11 02:19:56 +00003624SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG,
3625 SDOperand Chain) {
Evan Chenga9467aa2006-04-25 20:13:52 +00003626 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer");
3627 SDOperand Cond;
Evan Cheng4259a0f2006-09-11 02:19:56 +00003628 SDOperand Op0 = Op.getOperand(0);
3629 SDOperand Op1 = Op.getOperand(1);
Evan Chenga9467aa2006-04-25 20:13:52 +00003630 SDOperand CC = Op.getOperand(2);
3631 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
Evan Cheng4259a0f2006-09-11 02:19:56 +00003632 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);
Evan Chenga9467aa2006-04-25 20:13:52 +00003633 bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType());
Evan Chenga9467aa2006-04-25 20:13:52 +00003634 unsigned X86CC;
Evan Chenga9467aa2006-04-25 20:13:52 +00003635
Evan Cheng4259a0f2006-09-11 02:19:56 +00003636 VTs = DAG.getNodeValueTypes(MVT::i8, MVT::Flag);
Chris Lattner7a627672006-09-13 03:22:10 +00003637 if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC,
3638 Op0, Op1, DAG)) {
Evan Cheng4259a0f2006-09-11 02:19:56 +00003639 SDOperand Ops1[] = { Chain, Op0, Op1 };
3640 Cond = DAG.getNode(X86ISD::CMP, VTs, 2, Ops1, 3).getValue(1);
3641 SDOperand Ops2[] = { DAG.getConstant(X86CC, MVT::i8), Cond };
3642 return DAG.getNode(X86ISD::SETCC, VTs, 2, Ops2, 2);
3643 }
3644
3645 assert(isFP && "Illegal integer SetCC!");
3646
3647 SDOperand COps[] = { Chain, Op0, Op1 };
3648 Cond = DAG.getNode(X86ISD::CMP, VTs, 2, COps, 3).getValue(1);
3649
3650 switch (SetCCOpcode) {
3651 default: assert(false && "Illegal floating point SetCC!");
3652 case ISD::SETOEQ: { // !PF & ZF
3653 SDOperand Ops1[] = { DAG.getConstant(X86ISD::COND_NP, MVT::i8), Cond };
3654 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, VTs, 2, Ops1, 2);
3655 SDOperand Ops2[] = { DAG.getConstant(X86ISD::COND_E, MVT::i8),
3656 Tmp1.getValue(1) };
3657 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, VTs, 2, Ops2, 2);
3658 return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2);
3659 }
3660 case ISD::SETUNE: { // PF | !ZF
3661 SDOperand Ops1[] = { DAG.getConstant(X86ISD::COND_P, MVT::i8), Cond };
3662 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, VTs, 2, Ops1, 2);
3663 SDOperand Ops2[] = { DAG.getConstant(X86ISD::COND_NE, MVT::i8),
3664 Tmp1.getValue(1) };
3665 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, VTs, 2, Ops2, 2);
3666 return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2);
3667 }
Evan Chengc1583db2005-12-21 20:21:51 +00003668 }
Evan Chenga9467aa2006-04-25 20:13:52 +00003669}
Evan Cheng45df7f82006-01-30 23:41:35 +00003670
Evan Chenga9467aa2006-04-25 20:13:52 +00003671SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) {
Evan Cheng4259a0f2006-09-11 02:19:56 +00003672 bool addTest = true;
3673 SDOperand Chain = DAG.getEntryNode();
3674 SDOperand Cond = Op.getOperand(0);
3675 SDOperand CC;
3676 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);
Evan Cheng944d1e92006-01-26 02:13:10 +00003677
Evan Cheng4259a0f2006-09-11 02:19:56 +00003678 if (Cond.getOpcode() == ISD::SETCC)
3679 Cond = LowerSETCC(Cond, DAG, Chain);
3680
3681 if (Cond.getOpcode() == X86ISD::SETCC) {
3682 CC = Cond.getOperand(0);
3683
Evan Chenga9467aa2006-04-25 20:13:52 +00003684 // If condition flag is set by a X86ISD::CMP, then make a copy of it
Evan Cheng4259a0f2006-09-11 02:19:56 +00003685 // (since flag operand cannot be shared). Use it as the condition setting
3686 // operand in place of the X86ISD::SETCC.
3687 // If the X86ISD::SETCC has more than one use, then perhaps it's better
Evan Chenga9467aa2006-04-25 20:13:52 +00003688 // to use a test instead of duplicating the X86ISD::CMP (for register
Evan Cheng4259a0f2006-09-11 02:19:56 +00003689 // pressure reason)?
3690 SDOperand Cmp = Cond.getOperand(1);
3691 unsigned Opc = Cmp.getOpcode();
3692 bool IllegalFPCMov = !X86ScalarSSE &&
3693 MVT::isFloatingPoint(Op.getValueType()) &&
3694 !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended());
3695 if ((Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI) &&
3696 !IllegalFPCMov) {
3697 SDOperand Ops[] = { Chain, Cmp.getOperand(1), Cmp.getOperand(2) };
3698 Cond = DAG.getNode(Opc, VTs, 2, Ops, 3);
3699 addTest = false;
3700 }
3701 }
Evan Cheng73a1ad92006-01-10 20:26:56 +00003702
Evan Chenga9467aa2006-04-25 20:13:52 +00003703 if (addTest) {
3704 CC = DAG.getConstant(X86ISD::COND_NE, MVT::i8);
Evan Cheng4259a0f2006-09-11 02:19:56 +00003705 SDOperand Ops[] = { Chain, Cond, DAG.getConstant(0, MVT::i8) };
3706 Cond = DAG.getNode(X86ISD::CMP, VTs, 2, Ops, 3);
Evan Cheng225a4d02005-12-17 01:21:05 +00003707 }
Evan Cheng45df7f82006-01-30 23:41:35 +00003708
Evan Cheng4259a0f2006-09-11 02:19:56 +00003709 VTs = DAG.getNodeValueTypes(Op.getValueType(), MVT::Flag);
3710 SmallVector<SDOperand, 4> Ops;
Evan Chenga9467aa2006-04-25 20:13:52 +00003711 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
3712 // condition is true.
3713 Ops.push_back(Op.getOperand(2));
3714 Ops.push_back(Op.getOperand(1));
3715 Ops.push_back(CC);
Evan Cheng4259a0f2006-09-11 02:19:56 +00003716 Ops.push_back(Cond.getValue(1));
3717 return DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
Evan Chenga9467aa2006-04-25 20:13:52 +00003718}
Evan Cheng944d1e92006-01-26 02:13:10 +00003719
Evan Chenga9467aa2006-04-25 20:13:52 +00003720SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) {
Evan Cheng4259a0f2006-09-11 02:19:56 +00003721 bool addTest = true;
3722 SDOperand Chain = Op.getOperand(0);
Evan Chenga9467aa2006-04-25 20:13:52 +00003723 SDOperand Cond = Op.getOperand(1);
3724 SDOperand Dest = Op.getOperand(2);
3725 SDOperand CC;
Evan Cheng4259a0f2006-09-11 02:19:56 +00003726 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);
3727
Evan Chenga9467aa2006-04-25 20:13:52 +00003728 if (Cond.getOpcode() == ISD::SETCC)
Evan Cheng4259a0f2006-09-11 02:19:56 +00003729 Cond = LowerSETCC(Cond, DAG, Chain);
Evan Chenga9467aa2006-04-25 20:13:52 +00003730
3731 if (Cond.getOpcode() == X86ISD::SETCC) {
Evan Cheng4259a0f2006-09-11 02:19:56 +00003732 CC = Cond.getOperand(0);
Evan Chenga9467aa2006-04-25 20:13:52 +00003733
Evan Cheng4259a0f2006-09-11 02:19:56 +00003734 // If condition flag is set by a X86ISD::CMP, then make a copy of it
3735 // (since flag operand cannot be shared). Use it as the condition setting
3736 // operand in place of the X86ISD::SETCC.
3737 // If the X86ISD::SETCC has more than one use, then perhaps it's better
3738 // to use a test instead of duplicating the X86ISD::CMP (for register
3739 // pressure reason)?
3740 SDOperand Cmp = Cond.getOperand(1);
3741 unsigned Opc = Cmp.getOpcode();
3742 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI) {
3743 SDOperand Ops[] = { Chain, Cmp.getOperand(1), Cmp.getOperand(2) };
3744 Cond = DAG.getNode(Opc, VTs, 2, Ops, 3);
3745 addTest = false;
3746 }
3747 }
Evan Chengfb22e862006-01-13 01:03:02 +00003748
Evan Chenga9467aa2006-04-25 20:13:52 +00003749 if (addTest) {
3750 CC = DAG.getConstant(X86ISD::COND_NE, MVT::i8);
Evan Cheng4259a0f2006-09-11 02:19:56 +00003751 SDOperand Ops[] = { Chain, Cond, DAG.getConstant(0, MVT::i8) };
3752 Cond = DAG.getNode(X86ISD::CMP, VTs, 2, Ops, 3);
Evan Cheng6fc31042005-12-19 23:12:38 +00003753 }
Evan Chenga9467aa2006-04-25 20:13:52 +00003754 return DAG.getNode(X86ISD::BRCOND, Op.getValueType(),
Evan Cheng4259a0f2006-09-11 02:19:56 +00003755 Cond, Op.getOperand(2), CC, Cond.getValue(1));
Evan Chenga9467aa2006-04-25 20:13:52 +00003756}
Evan Chengae986f12006-01-11 22:15:48 +00003757
Evan Chenga9467aa2006-04-25 20:13:52 +00003758SDOperand X86TargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) {
3759 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
3760 SDOperand Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(),
3761 DAG.getTargetJumpTable(JT->getIndex(),
3762 getPointerTy()));
3763 if (Subtarget->isTargetDarwin()) {
3764 // With PIC, the address is actually $g + Offset.
Evan Cheng11b0a5d2006-09-08 06:48:29 +00003765 if (!Subtarget->is64Bit() &&
3766 getTargetMachine().getRelocationModel() == Reloc::PIC_)
Evan Chenga9467aa2006-04-25 20:13:52 +00003767 Result = DAG.getNode(ISD::ADD, getPointerTy(),
Chris Lattner3d826992006-05-16 06:45:34 +00003768 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
3769 Result);
Evan Chengae986f12006-01-11 22:15:48 +00003770 }
Evan Cheng99470012006-02-25 09:55:19 +00003771
Evan Chenga9467aa2006-04-25 20:13:52 +00003772 return Result;
3773}
Evan Cheng5588de92006-02-18 00:15:05 +00003774
Evan Cheng2a330942006-05-25 00:59:30 +00003775SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) {
3776 unsigned CallingConv= cast<ConstantSDNode>(Op.getOperand(1))->getValue();
Evan Cheng11b0a5d2006-09-08 06:48:29 +00003777 if (Subtarget->is64Bit())
3778 return LowerX86_64CCCCallTo(Op, DAG);
3779 else if (CallingConv == CallingConv::Fast && EnableFastCC)
Evan Cheng2a330942006-05-25 00:59:30 +00003780 return LowerFastCCCallTo(Op, DAG);
3781 else
3782 return LowerCCCCallTo(Op, DAG);
3783}
3784
Evan Chenga9467aa2006-04-25 20:13:52 +00003785SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) {
3786 SDOperand Copy;
Nate Begeman8c47c3a2006-01-27 21:09:22 +00003787
Evan Chenga9467aa2006-04-25 20:13:52 +00003788 switch(Op.getNumOperands()) {
Nate Begeman8c47c3a2006-01-27 21:09:22 +00003789 default:
3790 assert(0 && "Do not know how to return this many arguments!");
3791 abort();
Chris Lattnerc070c622006-04-17 20:32:50 +00003792 case 1: // ret void.
Nate Begeman8c47c3a2006-01-27 21:09:22 +00003793 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Op.getOperand(0),
Evan Chenga9467aa2006-04-25 20:13:52 +00003794 DAG.getConstant(getBytesToPopOnReturn(), MVT::i16));
Evan Chenga3add0f2006-05-26 23:10:12 +00003795 case 3: {
Nate Begeman8c47c3a2006-01-27 21:09:22 +00003796 MVT::ValueType ArgVT = Op.getOperand(1).getValueType();
Chris Lattnerc070c622006-04-17 20:32:50 +00003797
Evan Cheng11b0a5d2006-09-08 06:48:29 +00003798 if (MVT::isVector(ArgVT) ||
3799 (Subtarget->is64Bit() && MVT::isFloatingPoint(ArgVT))) {
Chris Lattnerc070c622006-04-17 20:32:50 +00003800 // Integer or FP vector result -> XMM0.
3801 if (DAG.getMachineFunction().liveout_empty())
3802 DAG.getMachineFunction().addLiveOut(X86::XMM0);
3803 Copy = DAG.getCopyToReg(Op.getOperand(0), X86::XMM0, Op.getOperand(1),
3804 SDOperand());
3805 } else if (MVT::isInteger(ArgVT)) {
Evan Cheng11b0a5d2006-09-08 06:48:29 +00003806 // Integer result -> EAX / RAX.
3807 // The C calling convention guarantees the return value has been
3808 // promoted to at least MVT::i32. The X86-64 ABI doesn't require the
3809 // value to be promoted MVT::i64. So we don't have to extend it to
3810 // 64-bit. Return the value in EAX, but mark RAX as liveout.
3811 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
Chris Lattnerc070c622006-04-17 20:32:50 +00003812 if (DAG.getMachineFunction().liveout_empty())
Evan Cheng11b0a5d2006-09-08 06:48:29 +00003813 DAG.getMachineFunction().addLiveOut(Reg);
Chris Lattnerc070c622006-04-17 20:32:50 +00003814
Evan Cheng11b0a5d2006-09-08 06:48:29 +00003815 Reg = (ArgVT == MVT::i64) ? X86::RAX : X86::EAX;
3816 Copy = DAG.getCopyToReg(Op.getOperand(0), Reg, Op.getOperand(1),
Nate Begeman8c47c3a2006-01-27 21:09:22 +00003817 SDOperand());
Chris Lattnerc070c622006-04-17 20:32:50 +00003818 } else if (!X86ScalarSSE) {
3819 // FP return with fp-stack value.
3820 if (DAG.getMachineFunction().liveout_empty())
3821 DAG.getMachineFunction().addLiveOut(X86::ST0);
3822
Nate Begeman8c47c3a2006-01-27 21:09:22 +00003823 std::vector<MVT::ValueType> Tys;
3824 Tys.push_back(MVT::Other);
3825 Tys.push_back(MVT::Flag);
3826 std::vector<SDOperand> Ops;
3827 Ops.push_back(Op.getOperand(0));
3828 Ops.push_back(Op.getOperand(1));
Evan Cheng5c68bba2006-08-11 07:35:45 +00003829 Copy = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, &Ops[0], Ops.size());
Nate Begeman8c47c3a2006-01-27 21:09:22 +00003830 } else {
Chris Lattnerc070c622006-04-17 20:32:50 +00003831 // FP return with ScalarSSE (return on fp-stack).
3832 if (DAG.getMachineFunction().liveout_empty())
3833 DAG.getMachineFunction().addLiveOut(X86::ST0);
3834
Evan Chenge1ce4d72006-02-01 00:20:21 +00003835 SDOperand MemLoc;
3836 SDOperand Chain = Op.getOperand(0);
Evan Cheng5659ca82006-01-31 23:19:54 +00003837 SDOperand Value = Op.getOperand(1);
3838
Evan Chenga24617f2006-02-01 01:19:32 +00003839 if (Value.getOpcode() == ISD::LOAD &&
3840 (Chain == Value.getValue(1) || Chain == Value.getOperand(0))) {
Evan Cheng5659ca82006-01-31 23:19:54 +00003841 Chain = Value.getOperand(0);
3842 MemLoc = Value.getOperand(1);
3843 } else {
3844 // Spill the value to memory and reload it into top of stack.
3845 unsigned Size = MVT::getSizeInBits(ArgVT)/8;
3846 MachineFunction &MF = DAG.getMachineFunction();
3847 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
3848 MemLoc = DAG.getFrameIndex(SSFI, getPointerTy());
3849 Chain = DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0),
3850 Value, MemLoc, DAG.getSrcValue(0));
3851 }
Nate Begeman8c47c3a2006-01-27 21:09:22 +00003852 std::vector<MVT::ValueType> Tys;
3853 Tys.push_back(MVT::f64);
3854 Tys.push_back(MVT::Other);
3855 std::vector<SDOperand> Ops;
3856 Ops.push_back(Chain);
Evan Cheng5659ca82006-01-31 23:19:54 +00003857 Ops.push_back(MemLoc);
Nate Begeman8c47c3a2006-01-27 21:09:22 +00003858 Ops.push_back(DAG.getValueType(ArgVT));
Evan Cheng5c68bba2006-08-11 07:35:45 +00003859 Copy = DAG.getNode(X86ISD::FLD, Tys, &Ops[0], Ops.size());
Nate Begeman8c47c3a2006-01-27 21:09:22 +00003860 Tys.clear();
3861 Tys.push_back(MVT::Other);
3862 Tys.push_back(MVT::Flag);
3863 Ops.clear();
3864 Ops.push_back(Copy.getValue(1));
3865 Ops.push_back(Copy);
Evan Cheng5c68bba2006-08-11 07:35:45 +00003866 Copy = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, &Ops[0], Ops.size());
Nate Begeman8c47c3a2006-01-27 21:09:22 +00003867 }
3868 break;
3869 }
Evan Cheng11b0a5d2006-09-08 06:48:29 +00003870 case 5: {
3871 unsigned Reg1 = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
3872 unsigned Reg2 = Subtarget->is64Bit() ? X86::RDX : X86::EDX;
Chris Lattnerc070c622006-04-17 20:32:50 +00003873 if (DAG.getMachineFunction().liveout_empty()) {
Evan Cheng11b0a5d2006-09-08 06:48:29 +00003874 DAG.getMachineFunction().addLiveOut(Reg1);
3875 DAG.getMachineFunction().addLiveOut(Reg2);
Chris Lattnerc070c622006-04-17 20:32:50 +00003876 }
3877
Evan Cheng11b0a5d2006-09-08 06:48:29 +00003878 Copy = DAG.getCopyToReg(Op.getOperand(0), Reg2, Op.getOperand(3),
Nate Begeman8c47c3a2006-01-27 21:09:22 +00003879 SDOperand());
Evan Cheng11b0a5d2006-09-08 06:48:29 +00003880 Copy = DAG.getCopyToReg(Copy, Reg1, Op.getOperand(1), Copy.getValue(1));
Nate Begeman8c47c3a2006-01-27 21:09:22 +00003881 break;
Evan Cheng11b0a5d2006-09-08 06:48:29 +00003882 }
Nate Begeman8c47c3a2006-01-27 21:09:22 +00003883 }
Evan Chenga9467aa2006-04-25 20:13:52 +00003884 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other,
Evan Cheng11b0a5d2006-09-08 06:48:29 +00003885 Copy, DAG.getConstant(getBytesToPopOnReturn(), MVT::i16),
Evan Chenga9467aa2006-04-25 20:13:52 +00003886 Copy.getValue(1));
3887}
3888
Evan Chenge0bcfbe2006-04-26 01:20:17 +00003889SDOperand
3890X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) {
Evan Chengdc614c12006-06-06 23:30:24 +00003891 MachineFunction &MF = DAG.getMachineFunction();
3892 const Function* Fn = MF.getFunction();
3893 if (Fn->hasExternalLinkage() &&
Evan Cheng0e14a562006-06-09 06:24:42 +00003894 Subtarget->TargetType == X86Subtarget::isCygwin &&
3895 Fn->getName() == "main")
Evan Chengdc614c12006-06-06 23:30:24 +00003896 MF.getInfo<X86FunctionInfo>()->setForceFramePointer(true);
3897
Evan Cheng17e734f2006-05-23 21:06:34 +00003898 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
Evan Cheng11b0a5d2006-09-08 06:48:29 +00003899 if (Subtarget->is64Bit())
3900 return LowerX86_64CCCArguments(Op, DAG);
3901 else if (CC == CallingConv::Fast && EnableFastCC)
Evan Cheng17e734f2006-05-23 21:06:34 +00003902 return LowerFastCCArguments(Op, DAG);
3903 else
3904 return LowerCCCArguments(Op, DAG);
Evan Chenge0bcfbe2006-04-26 01:20:17 +00003905}
3906
Evan Chenga9467aa2006-04-25 20:13:52 +00003907SDOperand X86TargetLowering::LowerMEMSET(SDOperand Op, SelectionDAG &DAG) {
3908 SDOperand InFlag(0, 0);
3909 SDOperand Chain = Op.getOperand(0);
3910 unsigned Align =
3911 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue();
3912 if (Align == 0) Align = 1;
3913
3914 ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3));
3915 // If not DWORD aligned, call memset if size is less than the threshold.
3916 // It knows how to align to the right boundary first.
3917 if ((Align & 3) != 0 ||
3918 (I && I->getValue() < Subtarget->getMinRepStrSizeThreshold())) {
3919 MVT::ValueType IntPtr = getPointerTy();
Owen Anderson20a631f2006-05-03 01:29:57 +00003920 const Type *IntPtrTy = getTargetData()->getIntPtrType();
Evan Chenga9467aa2006-04-25 20:13:52 +00003921 std::vector<std::pair<SDOperand, const Type*> > Args;
3922 Args.push_back(std::make_pair(Op.getOperand(1), IntPtrTy));
3923 // Extend the ubyte argument to be an int value for the call.
3924 SDOperand Val = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Op.getOperand(2));
3925 Args.push_back(std::make_pair(Val, IntPtrTy));
3926 Args.push_back(std::make_pair(Op.getOperand(3), IntPtrTy));
3927 std::pair<SDOperand,SDOperand> CallResult =
3928 LowerCallTo(Chain, Type::VoidTy, false, CallingConv::C, false,
3929 DAG.getExternalSymbol("memset", IntPtr), Args, DAG);
3930 return CallResult.second;
Evan Chengd5e905d2006-03-21 23:01:21 +00003931 }
Evan Chengd097e672006-03-22 02:53:00 +00003932
Evan Chenga9467aa2006-04-25 20:13:52 +00003933 MVT::ValueType AVT;
3934 SDOperand Count;
3935 ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Op.getOperand(2));
3936 unsigned BytesLeft = 0;
3937 bool TwoRepStos = false;
3938 if (ValC) {
3939 unsigned ValReg;
Evan Cheng11b0a5d2006-09-08 06:48:29 +00003940 uint64_t Val = ValC->getValue() & 255;
Evan Chengc995b452006-04-06 23:23:56 +00003941
Evan Chenga9467aa2006-04-25 20:13:52 +00003942 // If the value is a constant, then we can potentially use larger sets.
3943 switch (Align & 3) {
3944 case 2: // WORD aligned
3945 AVT = MVT::i16;
Evan Chenga9467aa2006-04-25 20:13:52 +00003946 ValReg = X86::AX;
Evan Cheng11b0a5d2006-09-08 06:48:29 +00003947 Val = (Val << 8) | Val;
Evan Chenga9467aa2006-04-25 20:13:52 +00003948 break;
Evan Cheng11b0a5d2006-09-08 06:48:29 +00003949 case 0: // DWORD aligned
Evan Chenga9467aa2006-04-25 20:13:52 +00003950 AVT = MVT::i32;
Evan Cheng11b0a5d2006-09-08 06:48:29 +00003951 ValReg = X86::EAX;
Evan Chenga9467aa2006-04-25 20:13:52 +00003952 Val = (Val << 8) | Val;
3953 Val = (Val << 16) | Val;
Evan Cheng11b0a5d2006-09-08 06:48:29 +00003954 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) { // QWORD aligned
3955 AVT = MVT::i64;
3956 ValReg = X86::RAX;
3957 Val = (Val << 32) | Val;
3958 }
Evan Chenga9467aa2006-04-25 20:13:52 +00003959 break;
3960 default: // Byte aligned
3961 AVT = MVT::i8;
Evan Chenga9467aa2006-04-25 20:13:52 +00003962 ValReg = X86::AL;
Evan Cheng11b0a5d2006-09-08 06:48:29 +00003963 Count = Op.getOperand(3);
Evan Chenga9467aa2006-04-25 20:13:52 +00003964 break;
Evan Chenga3caaee2006-04-19 22:48:17 +00003965 }
3966
Evan Cheng11b0a5d2006-09-08 06:48:29 +00003967 if (AVT > MVT::i8) {
3968 if (I) {
3969 unsigned UBytes = MVT::getSizeInBits(AVT) / 8;
3970 Count = DAG.getConstant(I->getValue() / UBytes, getPointerTy());
3971 BytesLeft = I->getValue() % UBytes;
3972 } else {
3973 assert(AVT >= MVT::i32 &&
3974 "Do not use rep;stos if not at least DWORD aligned");
3975 Count = DAG.getNode(ISD::SRL, Op.getOperand(3).getValueType(),
3976 Op.getOperand(3), DAG.getConstant(2, MVT::i8));
3977 TwoRepStos = true;
3978 }
3979 }
3980
Evan Chenga9467aa2006-04-25 20:13:52 +00003981 Chain = DAG.getCopyToReg(Chain, ValReg, DAG.getConstant(Val, AVT),
3982 InFlag);
3983 InFlag = Chain.getValue(1);
3984 } else {
3985 AVT = MVT::i8;
3986 Count = Op.getOperand(3);
3987 Chain = DAG.getCopyToReg(Chain, X86::AL, Op.getOperand(2), InFlag);
3988 InFlag = Chain.getValue(1);
Evan Chengd097e672006-03-22 02:53:00 +00003989 }
Evan Chengb0461082006-04-24 18:01:45 +00003990
Evan Cheng11b0a5d2006-09-08 06:48:29 +00003991 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX,
3992 Count, InFlag);
Evan Chenga9467aa2006-04-25 20:13:52 +00003993 InFlag = Chain.getValue(1);
Evan Cheng11b0a5d2006-09-08 06:48:29 +00003994 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI,
3995 Op.getOperand(1), InFlag);
Evan Chenga9467aa2006-04-25 20:13:52 +00003996 InFlag = Chain.getValue(1);
Evan Cheng9b9cc4f2006-03-27 07:00:16 +00003997
Evan Chenga9467aa2006-04-25 20:13:52 +00003998 std::vector<MVT::ValueType> Tys;
3999 Tys.push_back(MVT::Other);
4000 Tys.push_back(MVT::Flag);
4001 std::vector<SDOperand> Ops;
4002 Ops.push_back(Chain);
4003 Ops.push_back(DAG.getValueType(AVT));
4004 Ops.push_back(InFlag);
Evan Cheng5c68bba2006-08-11 07:35:45 +00004005 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size());
Evan Chengb0461082006-04-24 18:01:45 +00004006
Evan Chenga9467aa2006-04-25 20:13:52 +00004007 if (TwoRepStos) {
4008 InFlag = Chain.getValue(1);
4009 Count = Op.getOperand(3);
4010 MVT::ValueType CVT = Count.getValueType();
4011 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count,
Evan Cheng11b0a5d2006-09-08 06:48:29 +00004012 DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT));
4013 Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX,
4014 Left, InFlag);
Evan Chenga9467aa2006-04-25 20:13:52 +00004015 InFlag = Chain.getValue(1);
4016 Tys.clear();
4017 Tys.push_back(MVT::Other);
4018 Tys.push_back(MVT::Flag);
4019 Ops.clear();
4020 Ops.push_back(Chain);
4021 Ops.push_back(DAG.getValueType(MVT::i8));
4022 Ops.push_back(InFlag);
Evan Cheng5c68bba2006-08-11 07:35:45 +00004023 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size());
Evan Chenga9467aa2006-04-25 20:13:52 +00004024 } else if (BytesLeft) {
Evan Cheng11b0a5d2006-09-08 06:48:29 +00004025 // Issue stores for the last 1 - 7 bytes.
Evan Chenga9467aa2006-04-25 20:13:52 +00004026 SDOperand Value;
4027 unsigned Val = ValC->getValue() & 255;
4028 unsigned Offset = I->getValue() - BytesLeft;
4029 SDOperand DstAddr = Op.getOperand(1);
4030 MVT::ValueType AddrVT = DstAddr.getValueType();
Evan Cheng11b0a5d2006-09-08 06:48:29 +00004031 if (BytesLeft >= 4) {
4032 Val = (Val << 8) | Val;
4033 Val = (Val << 16) | Val;
4034 Value = DAG.getConstant(Val, MVT::i32);
4035 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
4036 DAG.getNode(ISD::ADD, AddrVT, DstAddr,
4037 DAG.getConstant(Offset, AddrVT)),
4038 DAG.getSrcValue(NULL));
4039 BytesLeft -= 4;
4040 Offset += 4;
4041 }
Evan Chenga9467aa2006-04-25 20:13:52 +00004042 if (BytesLeft >= 2) {
4043 Value = DAG.getConstant((Val << 8) | Val, MVT::i16);
4044 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
4045 DAG.getNode(ISD::ADD, AddrVT, DstAddr,
4046 DAG.getConstant(Offset, AddrVT)),
4047 DAG.getSrcValue(NULL));
4048 BytesLeft -= 2;
4049 Offset += 2;
Evan Cheng082c8782006-03-24 07:29:27 +00004050 }
Evan Chenga9467aa2006-04-25 20:13:52 +00004051 if (BytesLeft == 1) {
4052 Value = DAG.getConstant(Val, MVT::i8);
4053 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
4054 DAG.getNode(ISD::ADD, AddrVT, DstAddr,
4055 DAG.getConstant(Offset, AddrVT)),
4056 DAG.getSrcValue(NULL));
Evan Cheng14215c32006-04-21 23:03:30 +00004057 }
Evan Cheng082c8782006-03-24 07:29:27 +00004058 }
Evan Chengebf10062006-04-03 20:53:28 +00004059
Evan Chenga9467aa2006-04-25 20:13:52 +00004060 return Chain;
4061}
Evan Chengebf10062006-04-03 20:53:28 +00004062
Evan Chenga9467aa2006-04-25 20:13:52 +00004063SDOperand X86TargetLowering::LowerMEMCPY(SDOperand Op, SelectionDAG &DAG) {
4064 SDOperand Chain = Op.getOperand(0);
4065 unsigned Align =
4066 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue();
4067 if (Align == 0) Align = 1;
Evan Chengebf10062006-04-03 20:53:28 +00004068
Evan Chenga9467aa2006-04-25 20:13:52 +00004069 ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3));
4070 // If not DWORD aligned, call memcpy if size is less than the threshold.
4071 // It knows how to align to the right boundary first.
4072 if ((Align & 3) != 0 ||
4073 (I && I->getValue() < Subtarget->getMinRepStrSizeThreshold())) {
4074 MVT::ValueType IntPtr = getPointerTy();
Owen Anderson20a631f2006-05-03 01:29:57 +00004075 const Type *IntPtrTy = getTargetData()->getIntPtrType();
Evan Chenga9467aa2006-04-25 20:13:52 +00004076 std::vector<std::pair<SDOperand, const Type*> > Args;
4077 Args.push_back(std::make_pair(Op.getOperand(1), IntPtrTy));
4078 Args.push_back(std::make_pair(Op.getOperand(2), IntPtrTy));
4079 Args.push_back(std::make_pair(Op.getOperand(3), IntPtrTy));
4080 std::pair<SDOperand,SDOperand> CallResult =
4081 LowerCallTo(Chain, Type::VoidTy, false, CallingConv::C, false,
4082 DAG.getExternalSymbol("memcpy", IntPtr), Args, DAG);
4083 return CallResult.second;
Evan Chengcbffa462006-03-31 19:22:53 +00004084 }
Evan Chenga9467aa2006-04-25 20:13:52 +00004085
4086 MVT::ValueType AVT;
4087 SDOperand Count;
4088 unsigned BytesLeft = 0;
4089 bool TwoRepMovs = false;
4090 switch (Align & 3) {
4091 case 2: // WORD aligned
4092 AVT = MVT::i16;
Evan Chenga9467aa2006-04-25 20:13:52 +00004093 break;
Evan Cheng11b0a5d2006-09-08 06:48:29 +00004094 case 0: // DWORD aligned
Evan Chenga9467aa2006-04-25 20:13:52 +00004095 AVT = MVT::i32;
Evan Cheng11b0a5d2006-09-08 06:48:29 +00004096 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) // QWORD aligned
4097 AVT = MVT::i64;
Evan Chenga9467aa2006-04-25 20:13:52 +00004098 break;
4099 default: // Byte aligned
4100 AVT = MVT::i8;
4101 Count = Op.getOperand(3);
4102 break;
4103 }
4104
Evan Cheng11b0a5d2006-09-08 06:48:29 +00004105 if (AVT > MVT::i8) {
4106 if (I) {
4107 unsigned UBytes = MVT::getSizeInBits(AVT) / 8;
4108 Count = DAG.getConstant(I->getValue() / UBytes, getPointerTy());
4109 BytesLeft = I->getValue() % UBytes;
4110 } else {
4111 assert(AVT >= MVT::i32 &&
4112 "Do not use rep;movs if not at least DWORD aligned");
4113 Count = DAG.getNode(ISD::SRL, Op.getOperand(3).getValueType(),
4114 Op.getOperand(3), DAG.getConstant(2, MVT::i8));
4115 TwoRepMovs = true;
4116 }
4117 }
4118
Evan Chenga9467aa2006-04-25 20:13:52 +00004119 SDOperand InFlag(0, 0);
Evan Cheng11b0a5d2006-09-08 06:48:29 +00004120 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX,
4121 Count, InFlag);
Evan Chenga9467aa2006-04-25 20:13:52 +00004122 InFlag = Chain.getValue(1);
Evan Cheng11b0a5d2006-09-08 06:48:29 +00004123 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI,
4124 Op.getOperand(1), InFlag);
Evan Chenga9467aa2006-04-25 20:13:52 +00004125 InFlag = Chain.getValue(1);
Evan Cheng11b0a5d2006-09-08 06:48:29 +00004126 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RSI : X86::ESI,
4127 Op.getOperand(2), InFlag);
Evan Chenga9467aa2006-04-25 20:13:52 +00004128 InFlag = Chain.getValue(1);
4129
4130 std::vector<MVT::ValueType> Tys;
4131 Tys.push_back(MVT::Other);
4132 Tys.push_back(MVT::Flag);
4133 std::vector<SDOperand> Ops;
4134 Ops.push_back(Chain);
4135 Ops.push_back(DAG.getValueType(AVT));
4136 Ops.push_back(InFlag);
Evan Cheng5c68bba2006-08-11 07:35:45 +00004137 Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size());
Evan Chenga9467aa2006-04-25 20:13:52 +00004138
4139 if (TwoRepMovs) {
4140 InFlag = Chain.getValue(1);
4141 Count = Op.getOperand(3);
4142 MVT::ValueType CVT = Count.getValueType();
4143 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count,
Evan Cheng11b0a5d2006-09-08 06:48:29 +00004144 DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT));
4145 Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX,
4146 Left, InFlag);
Evan Chenga9467aa2006-04-25 20:13:52 +00004147 InFlag = Chain.getValue(1);
4148 Tys.clear();
4149 Tys.push_back(MVT::Other);
4150 Tys.push_back(MVT::Flag);
4151 Ops.clear();
4152 Ops.push_back(Chain);
4153 Ops.push_back(DAG.getValueType(MVT::i8));
4154 Ops.push_back(InFlag);
Evan Cheng5c68bba2006-08-11 07:35:45 +00004155 Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size());
Evan Chenga9467aa2006-04-25 20:13:52 +00004156 } else if (BytesLeft) {
Evan Cheng11b0a5d2006-09-08 06:48:29 +00004157 // Issue loads and stores for the last 1 - 7 bytes.
Evan Chenga9467aa2006-04-25 20:13:52 +00004158 unsigned Offset = I->getValue() - BytesLeft;
4159 SDOperand DstAddr = Op.getOperand(1);
4160 MVT::ValueType DstVT = DstAddr.getValueType();
4161 SDOperand SrcAddr = Op.getOperand(2);
4162 MVT::ValueType SrcVT = SrcAddr.getValueType();
4163 SDOperand Value;
Evan Cheng11b0a5d2006-09-08 06:48:29 +00004164 if (BytesLeft >= 4) {
4165 Value = DAG.getLoad(MVT::i32, Chain,
4166 DAG.getNode(ISD::ADD, SrcVT, SrcAddr,
4167 DAG.getConstant(Offset, SrcVT)),
4168 DAG.getSrcValue(NULL));
4169 Chain = Value.getValue(1);
4170 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
4171 DAG.getNode(ISD::ADD, DstVT, DstAddr,
4172 DAG.getConstant(Offset, DstVT)),
4173 DAG.getSrcValue(NULL));
4174 BytesLeft -= 4;
4175 Offset += 4;
4176 }
Evan Chenga9467aa2006-04-25 20:13:52 +00004177 if (BytesLeft >= 2) {
4178 Value = DAG.getLoad(MVT::i16, Chain,
4179 DAG.getNode(ISD::ADD, SrcVT, SrcAddr,
4180 DAG.getConstant(Offset, SrcVT)),
4181 DAG.getSrcValue(NULL));
4182 Chain = Value.getValue(1);
4183 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
4184 DAG.getNode(ISD::ADD, DstVT, DstAddr,
4185 DAG.getConstant(Offset, DstVT)),
4186 DAG.getSrcValue(NULL));
4187 BytesLeft -= 2;
4188 Offset += 2;
Evan Chengcbffa462006-03-31 19:22:53 +00004189 }
4190
Evan Chenga9467aa2006-04-25 20:13:52 +00004191 if (BytesLeft == 1) {
4192 Value = DAG.getLoad(MVT::i8, Chain,
4193 DAG.getNode(ISD::ADD, SrcVT, SrcAddr,
4194 DAG.getConstant(Offset, SrcVT)),
4195 DAG.getSrcValue(NULL));
4196 Chain = Value.getValue(1);
4197 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
4198 DAG.getNode(ISD::ADD, DstVT, DstAddr,
4199 DAG.getConstant(Offset, DstVT)),
4200 DAG.getSrcValue(NULL));
4201 }
Evan Chengcbffa462006-03-31 19:22:53 +00004202 }
Evan Chenga9467aa2006-04-25 20:13:52 +00004203
4204 return Chain;
4205}
4206
4207SDOperand
4208X86TargetLowering::LowerREADCYCLCECOUNTER(SDOperand Op, SelectionDAG &DAG) {
4209 std::vector<MVT::ValueType> Tys;
4210 Tys.push_back(MVT::Other);
4211 Tys.push_back(MVT::Flag);
4212 std::vector<SDOperand> Ops;
4213 Ops.push_back(Op.getOperand(0));
Evan Cheng5c68bba2006-08-11 07:35:45 +00004214 SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, &Ops[0], Ops.size());
Evan Chenga9467aa2006-04-25 20:13:52 +00004215 Ops.clear();
4216 Ops.push_back(DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1)));
4217 Ops.push_back(DAG.getCopyFromReg(Ops[0].getValue(1), X86::EDX,
4218 MVT::i32, Ops[0].getValue(2)));
4219 Ops.push_back(Ops[1].getValue(1));
4220 Tys[0] = Tys[1] = MVT::i32;
4221 Tys.push_back(MVT::Other);
Evan Cheng5c68bba2006-08-11 07:35:45 +00004222 return DAG.getNode(ISD::MERGE_VALUES, Tys, &Ops[0], Ops.size());
Evan Chenga9467aa2006-04-25 20:13:52 +00004223}
4224
4225SDOperand X86TargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG) {
Evan Cheng11b0a5d2006-09-08 06:48:29 +00004226 if (!Subtarget->is64Bit()) {
4227 // vastart just stores the address of the VarArgsFrameIndex slot into the
4228 // memory location argument.
4229 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy());
4230 return DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0), FR,
4231 Op.getOperand(1), Op.getOperand(2));
4232 }
4233
4234 // __va_list_tag:
4235 // gp_offset (0 - 6 * 8)
4236 // fp_offset (48 - 48 + 8 * 16)
4237 // overflow_arg_area (point to parameters coming in memory).
4238 // reg_save_area
4239 std::vector<SDOperand> MemOps;
4240 SDOperand FIN = Op.getOperand(1);
4241 // Store gp_offset
4242 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0),
4243 DAG.getConstant(VarArgsGPOffset, MVT::i32),
4244 FIN, Op.getOperand(2));
4245 MemOps.push_back(Store);
4246
4247 // Store fp_offset
4248 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
4249 DAG.getConstant(4, getPointerTy()));
4250 Store = DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0),
4251 DAG.getConstant(VarArgsFPOffset, MVT::i32),
4252 FIN, Op.getOperand(2));
4253 MemOps.push_back(Store);
4254
4255 // Store ptr to overflow_arg_area
4256 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
4257 DAG.getConstant(4, getPointerTy()));
4258 SDOperand OVFIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy());
4259 Store = DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0),
4260 OVFIN, FIN, Op.getOperand(2));
4261 MemOps.push_back(Store);
4262
4263 // Store ptr to reg_save_area.
4264 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
4265 DAG.getConstant(8, getPointerTy()));
4266 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy());
4267 Store = DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0),
4268 RSFIN, FIN, Op.getOperand(2));
4269 MemOps.push_back(Store);
4270 return DAG.getNode(ISD::TokenFactor, MVT::Other, &MemOps[0], MemOps.size());
Evan Chenga9467aa2006-04-25 20:13:52 +00004271}
4272
4273SDOperand
4274X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) {
4275 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue();
4276 switch (IntNo) {
4277 default: return SDOperand(); // Don't custom lower most intrinsics.
Evan Cheng78038292006-04-05 23:38:46 +00004278 // Comparison intrinsics.
Evan Chenga9467aa2006-04-25 20:13:52 +00004279 case Intrinsic::x86_sse_comieq_ss:
4280 case Intrinsic::x86_sse_comilt_ss:
4281 case Intrinsic::x86_sse_comile_ss:
4282 case Intrinsic::x86_sse_comigt_ss:
4283 case Intrinsic::x86_sse_comige_ss:
4284 case Intrinsic::x86_sse_comineq_ss:
4285 case Intrinsic::x86_sse_ucomieq_ss:
4286 case Intrinsic::x86_sse_ucomilt_ss:
4287 case Intrinsic::x86_sse_ucomile_ss:
4288 case Intrinsic::x86_sse_ucomigt_ss:
4289 case Intrinsic::x86_sse_ucomige_ss:
4290 case Intrinsic::x86_sse_ucomineq_ss:
4291 case Intrinsic::x86_sse2_comieq_sd:
4292 case Intrinsic::x86_sse2_comilt_sd:
4293 case Intrinsic::x86_sse2_comile_sd:
4294 case Intrinsic::x86_sse2_comigt_sd:
4295 case Intrinsic::x86_sse2_comige_sd:
4296 case Intrinsic::x86_sse2_comineq_sd:
4297 case Intrinsic::x86_sse2_ucomieq_sd:
4298 case Intrinsic::x86_sse2_ucomilt_sd:
4299 case Intrinsic::x86_sse2_ucomile_sd:
4300 case Intrinsic::x86_sse2_ucomigt_sd:
4301 case Intrinsic::x86_sse2_ucomige_sd:
4302 case Intrinsic::x86_sse2_ucomineq_sd: {
4303 unsigned Opc = 0;
4304 ISD::CondCode CC = ISD::SETCC_INVALID;
4305 switch (IntNo) {
4306 default: break;
4307 case Intrinsic::x86_sse_comieq_ss:
4308 case Intrinsic::x86_sse2_comieq_sd:
4309 Opc = X86ISD::COMI;
4310 CC = ISD::SETEQ;
4311 break;
Evan Cheng78038292006-04-05 23:38:46 +00004312 case Intrinsic::x86_sse_comilt_ss:
Evan Cheng78038292006-04-05 23:38:46 +00004313 case Intrinsic::x86_sse2_comilt_sd:
Evan Chenga9467aa2006-04-25 20:13:52 +00004314 Opc = X86ISD::COMI;
4315 CC = ISD::SETLT;
4316 break;
4317 case Intrinsic::x86_sse_comile_ss:
Evan Cheng78038292006-04-05 23:38:46 +00004318 case Intrinsic::x86_sse2_comile_sd:
Evan Chenga9467aa2006-04-25 20:13:52 +00004319 Opc = X86ISD::COMI;
4320 CC = ISD::SETLE;
4321 break;
4322 case Intrinsic::x86_sse_comigt_ss:
Evan Cheng78038292006-04-05 23:38:46 +00004323 case Intrinsic::x86_sse2_comigt_sd:
Evan Chenga9467aa2006-04-25 20:13:52 +00004324 Opc = X86ISD::COMI;
4325 CC = ISD::SETGT;
4326 break;
4327 case Intrinsic::x86_sse_comige_ss:
Evan Cheng78038292006-04-05 23:38:46 +00004328 case Intrinsic::x86_sse2_comige_sd:
Evan Chenga9467aa2006-04-25 20:13:52 +00004329 Opc = X86ISD::COMI;
4330 CC = ISD::SETGE;
4331 break;
4332 case Intrinsic::x86_sse_comineq_ss:
Evan Cheng78038292006-04-05 23:38:46 +00004333 case Intrinsic::x86_sse2_comineq_sd:
Evan Chenga9467aa2006-04-25 20:13:52 +00004334 Opc = X86ISD::COMI;
4335 CC = ISD::SETNE;
4336 break;
4337 case Intrinsic::x86_sse_ucomieq_ss:
Evan Cheng78038292006-04-05 23:38:46 +00004338 case Intrinsic::x86_sse2_ucomieq_sd:
Evan Chenga9467aa2006-04-25 20:13:52 +00004339 Opc = X86ISD::UCOMI;
4340 CC = ISD::SETEQ;
4341 break;
4342 case Intrinsic::x86_sse_ucomilt_ss:
Evan Cheng78038292006-04-05 23:38:46 +00004343 case Intrinsic::x86_sse2_ucomilt_sd:
Evan Chenga9467aa2006-04-25 20:13:52 +00004344 Opc = X86ISD::UCOMI;
4345 CC = ISD::SETLT;
4346 break;
4347 case Intrinsic::x86_sse_ucomile_ss:
Evan Cheng78038292006-04-05 23:38:46 +00004348 case Intrinsic::x86_sse2_ucomile_sd:
Evan Chenga9467aa2006-04-25 20:13:52 +00004349 Opc = X86ISD::UCOMI;
4350 CC = ISD::SETLE;
4351 break;
4352 case Intrinsic::x86_sse_ucomigt_ss:
Evan Cheng78038292006-04-05 23:38:46 +00004353 case Intrinsic::x86_sse2_ucomigt_sd:
Evan Chenga9467aa2006-04-25 20:13:52 +00004354 Opc = X86ISD::UCOMI;
4355 CC = ISD::SETGT;
4356 break;
4357 case Intrinsic::x86_sse_ucomige_ss:
Evan Cheng78038292006-04-05 23:38:46 +00004358 case Intrinsic::x86_sse2_ucomige_sd:
Evan Chenga9467aa2006-04-25 20:13:52 +00004359 Opc = X86ISD::UCOMI;
4360 CC = ISD::SETGE;
4361 break;
4362 case Intrinsic::x86_sse_ucomineq_ss:
4363 case Intrinsic::x86_sse2_ucomineq_sd:
4364 Opc = X86ISD::UCOMI;
4365 CC = ISD::SETNE;
4366 break;
Evan Cheng78038292006-04-05 23:38:46 +00004367 }
Evan Cheng4259a0f2006-09-11 02:19:56 +00004368
Evan Chenga9467aa2006-04-25 20:13:52 +00004369 unsigned X86CC;
Chris Lattner7a627672006-09-13 03:22:10 +00004370 SDOperand LHS = Op.getOperand(1);
4371 SDOperand RHS = Op.getOperand(2);
4372 translateX86CC(CC, true, X86CC, LHS, RHS, DAG);
Evan Cheng4259a0f2006-09-11 02:19:56 +00004373
4374 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);
Chris Lattner7a627672006-09-13 03:22:10 +00004375 SDOperand Ops1[] = { DAG.getEntryNode(), LHS, RHS };
Evan Cheng4259a0f2006-09-11 02:19:56 +00004376 SDOperand Cond = DAG.getNode(Opc, VTs, 2, Ops1, 3);
4377 VTs = DAG.getNodeValueTypes(MVT::i8, MVT::Flag);
4378 SDOperand Ops2[] = { DAG.getConstant(X86CC, MVT::i8), Cond };
4379 SDOperand SetCC = DAG.getNode(X86ISD::SETCC, VTs, 2, Ops2, 2);
Evan Chenga9467aa2006-04-25 20:13:52 +00004380 return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC);
Evan Cheng78038292006-04-05 23:38:46 +00004381 }
Evan Cheng5c59d492005-12-23 07:31:11 +00004382 }
Chris Lattner76ac0682005-11-15 00:40:23 +00004383}
Evan Cheng6af02632005-12-20 06:22:03 +00004384
Evan Chenga9467aa2006-04-25 20:13:52 +00004385/// LowerOperation - Provide custom lowering hooks for some operations.
4386///
4387SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
4388 switch (Op.getOpcode()) {
4389 default: assert(0 && "Should not custom lower this!");
4390 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
4391 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
4392 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
4393 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
4394 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
4395 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
4396 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
4397 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
4398 case ISD::SHL_PARTS:
4399 case ISD::SRA_PARTS:
4400 case ISD::SRL_PARTS: return LowerShift(Op, DAG);
4401 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
4402 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
4403 case ISD::FABS: return LowerFABS(Op, DAG);
4404 case ISD::FNEG: return LowerFNEG(Op, DAG);
Evan Cheng4259a0f2006-09-11 02:19:56 +00004405 case ISD::SETCC: return LowerSETCC(Op, DAG, DAG.getEntryNode());
Evan Chenga9467aa2006-04-25 20:13:52 +00004406 case ISD::SELECT: return LowerSELECT(Op, DAG);
4407 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
4408 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
Evan Cheng2a330942006-05-25 00:59:30 +00004409 case ISD::CALL: return LowerCALL(Op, DAG);
Evan Chenga9467aa2006-04-25 20:13:52 +00004410 case ISD::RET: return LowerRET(Op, DAG);
Evan Chenge0bcfbe2006-04-26 01:20:17 +00004411 case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG);
Evan Chenga9467aa2006-04-25 20:13:52 +00004412 case ISD::MEMSET: return LowerMEMSET(Op, DAG);
4413 case ISD::MEMCPY: return LowerMEMCPY(Op, DAG);
4414 case ISD::READCYCLECOUNTER: return LowerREADCYCLCECOUNTER(Op, DAG);
4415 case ISD::VASTART: return LowerVASTART(Op, DAG);
4416 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
4417 }
4418}
4419
Evan Cheng6af02632005-12-20 06:22:03 +00004420const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
4421 switch (Opcode) {
4422 default: return NULL;
Evan Cheng9c249c32006-01-09 18:33:28 +00004423 case X86ISD::SHLD: return "X86ISD::SHLD";
4424 case X86ISD::SHRD: return "X86ISD::SHRD";
Evan Cheng2dd217b2006-01-31 03:14:29 +00004425 case X86ISD::FAND: return "X86ISD::FAND";
Evan Cheng72d5c252006-01-31 22:28:30 +00004426 case X86ISD::FXOR: return "X86ISD::FXOR";
Evan Cheng6305e502006-01-12 22:54:21 +00004427 case X86ISD::FILD: return "X86ISD::FILD";
Evan Cheng11613a52006-02-04 02:20:30 +00004428 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
Evan Cheng6af02632005-12-20 06:22:03 +00004429 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
4430 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
4431 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
Evan Chenga74ce622005-12-21 02:39:21 +00004432 case X86ISD::FLD: return "X86ISD::FLD";
Evan Cheng45e190982006-01-05 00:27:02 +00004433 case X86ISD::FST: return "X86ISD::FST";
4434 case X86ISD::FP_GET_RESULT: return "X86ISD::FP_GET_RESULT";
Evan Chenga74ce622005-12-21 02:39:21 +00004435 case X86ISD::FP_SET_RESULT: return "X86ISD::FP_SET_RESULT";
Evan Cheng6af02632005-12-20 06:22:03 +00004436 case X86ISD::CALL: return "X86ISD::CALL";
4437 case X86ISD::TAILCALL: return "X86ISD::TAILCALL";
4438 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
4439 case X86ISD::CMP: return "X86ISD::CMP";
Evan Cheng78038292006-04-05 23:38:46 +00004440 case X86ISD::COMI: return "X86ISD::COMI";
4441 case X86ISD::UCOMI: return "X86ISD::UCOMI";
Evan Chengc1583db2005-12-21 20:21:51 +00004442 case X86ISD::SETCC: return "X86ISD::SETCC";
Evan Cheng6af02632005-12-20 06:22:03 +00004443 case X86ISD::CMOV: return "X86ISD::CMOV";
4444 case X86ISD::BRCOND: return "X86ISD::BRCOND";
Evan Chenga74ce622005-12-21 02:39:21 +00004445 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
Evan Cheng084a1022006-03-04 01:12:00 +00004446 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
4447 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
Evan Cheng72d5c252006-01-31 22:28:30 +00004448 case X86ISD::LOAD_PACK: return "X86ISD::LOAD_PACK";
Evan Cheng5987cfb2006-07-07 08:33:52 +00004449 case X86ISD::LOAD_UA: return "X86ISD::LOAD_UA";
Evan Cheng5588de92006-02-18 00:15:05 +00004450 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
Evan Chenge0ed6ec2006-02-23 20:41:18 +00004451 case X86ISD::Wrapper: return "X86ISD::Wrapper";
Evan Chenge7ee6a52006-03-24 23:15:12 +00004452 case X86ISD::S2VEC: return "X86ISD::S2VEC";
Evan Chengcbffa462006-03-31 19:22:53 +00004453 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
Evan Cheng5fd7c692006-03-31 21:55:24 +00004454 case X86ISD::PINSRW: return "X86ISD::PINSRW";
Evan Cheng6af02632005-12-20 06:22:03 +00004455 }
4456}
Evan Cheng9cdc16c2005-12-21 23:05:39 +00004457
Evan Cheng02612422006-07-05 22:17:51 +00004458/// isLegalAddressImmediate - Return true if the integer value or
4459/// GlobalValue can be used as the offset of the target addressing mode.
4460bool X86TargetLowering::isLegalAddressImmediate(int64_t V) const {
4461 // X86 allows a sign-extended 32-bit immediate field.
4462 return (V > -(1LL << 32) && V < (1LL << 32)-1);
4463}
4464
4465bool X86TargetLowering::isLegalAddressImmediate(GlobalValue *GV) const {
4466 // GV is 64-bit but displacement field is 32-bit unless we are in small code
4467 // model. Mac OS X happens to support only small PIC code model.
4468 // FIXME: better support for other OS's.
4469 if (Subtarget->is64Bit() && !Subtarget->isTargetDarwin())
4470 return false;
4471 if (Subtarget->isTargetDarwin()) {
4472 Reloc::Model RModel = getTargetMachine().getRelocationModel();
4473 if (RModel == Reloc::Static)
4474 return true;
4475 else if (RModel == Reloc::DynamicNoPIC)
4476 return !DarwinGVRequiresExtraLoad(GV);
4477 else
4478 return false;
4479 } else
4480 return true;
4481}
4482
4483/// isShuffleMaskLegal - Targets can use this to indicate that they only
4484/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
4485/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
4486/// are assumed to be legal.
4487bool
4488X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const {
4489 // Only do shuffles on 128-bit vector types for now.
4490 if (MVT::getSizeInBits(VT) == 64) return false;
4491 return (Mask.Val->getNumOperands() <= 4 ||
4492 isSplatMask(Mask.Val) ||
4493 isPSHUFHW_PSHUFLWMask(Mask.Val) ||
4494 X86::isUNPCKLMask(Mask.Val) ||
4495 X86::isUNPCKL_v_undef_Mask(Mask.Val) ||
4496 X86::isUNPCKHMask(Mask.Val));
4497}
4498
4499bool X86TargetLowering::isVectorClearMaskLegal(std::vector<SDOperand> &BVOps,
4500 MVT::ValueType EVT,
4501 SelectionDAG &DAG) const {
4502 unsigned NumElts = BVOps.size();
4503 // Only do shuffles on 128-bit vector types for now.
4504 if (MVT::getSizeInBits(EVT) * NumElts == 64) return false;
4505 if (NumElts == 2) return true;
4506 if (NumElts == 4) {
4507 return (isMOVLMask(BVOps) || isCommutedMOVL(BVOps, true) ||
4508 isSHUFPMask(BVOps) || isCommutedSHUFP(BVOps));
4509 }
4510 return false;
4511}
4512
4513//===----------------------------------------------------------------------===//
4514// X86 Scheduler Hooks
4515//===----------------------------------------------------------------------===//
4516
4517MachineBasicBlock *
4518X86TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
4519 MachineBasicBlock *BB) {
4520 switch (MI->getOpcode()) {
4521 default: assert(false && "Unexpected instr type to insert");
4522 case X86::CMOV_FR32:
4523 case X86::CMOV_FR64:
4524 case X86::CMOV_V4F32:
4525 case X86::CMOV_V2F64:
4526 case X86::CMOV_V2I64: {
4527 // To "insert" a SELECT_CC instruction, we actually have to insert the
4528 // diamond control-flow pattern. The incoming instruction knows the
4529 // destination vreg to set, the condition code register to branch on, the
4530 // true/false values to select between, and a branch opcode to use.
4531 const BasicBlock *LLVM_BB = BB->getBasicBlock();
4532 ilist<MachineBasicBlock>::iterator It = BB;
4533 ++It;
4534
4535 // thisMBB:
4536 // ...
4537 // TrueVal = ...
4538 // cmpTY ccX, r1, r2
4539 // bCC copy1MBB
4540 // fallthrough --> copy0MBB
4541 MachineBasicBlock *thisMBB = BB;
4542 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB);
4543 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
4544 unsigned Opc = getCondBrOpcodeForX86CC(MI->getOperand(3).getImmedValue());
4545 BuildMI(BB, Opc, 1).addMBB(sinkMBB);
4546 MachineFunction *F = BB->getParent();
4547 F->getBasicBlockList().insert(It, copy0MBB);
4548 F->getBasicBlockList().insert(It, sinkMBB);
4549 // Update machine-CFG edges by first adding all successors of the current
4550 // block to the new block which will contain the Phi node for the select.
4551 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(),
4552 e = BB->succ_end(); i != e; ++i)
4553 sinkMBB->addSuccessor(*i);
4554 // Next, remove all successors of the current block, and add the true
4555 // and fallthrough blocks as its successors.
4556 while(!BB->succ_empty())
4557 BB->removeSuccessor(BB->succ_begin());
4558 BB->addSuccessor(copy0MBB);
4559 BB->addSuccessor(sinkMBB);
4560
4561 // copy0MBB:
4562 // %FalseValue = ...
4563 // # fallthrough to sinkMBB
4564 BB = copy0MBB;
4565
4566 // Update machine-CFG edges
4567 BB->addSuccessor(sinkMBB);
4568
4569 // sinkMBB:
4570 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
4571 // ...
4572 BB = sinkMBB;
4573 BuildMI(BB, X86::PHI, 4, MI->getOperand(0).getReg())
4574 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
4575 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
4576
4577 delete MI; // The pseudo instruction is gone now.
4578 return BB;
4579 }
4580
4581 case X86::FP_TO_INT16_IN_MEM:
4582 case X86::FP_TO_INT32_IN_MEM:
4583 case X86::FP_TO_INT64_IN_MEM: {
4584 // Change the floating point control register to use "round towards zero"
4585 // mode when truncating to an integer value.
4586 MachineFunction *F = BB->getParent();
4587 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
4588 addFrameReference(BuildMI(BB, X86::FNSTCW16m, 4), CWFrameIdx);
4589
4590 // Load the old value of the high byte of the control word...
4591 unsigned OldCW =
4592 F->getSSARegMap()->createVirtualRegister(X86::GR16RegisterClass);
4593 addFrameReference(BuildMI(BB, X86::MOV16rm, 4, OldCW), CWFrameIdx);
4594
4595 // Set the high part to be round to zero...
4596 addFrameReference(BuildMI(BB, X86::MOV16mi, 5), CWFrameIdx).addImm(0xC7F);
4597
4598 // Reload the modified control word now...
4599 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
4600
4601 // Restore the memory image of control word to original value
4602 addFrameReference(BuildMI(BB, X86::MOV16mr, 5), CWFrameIdx).addReg(OldCW);
4603
4604 // Get the X86 opcode to use.
4605 unsigned Opc;
4606 switch (MI->getOpcode()) {
4607 default: assert(0 && "illegal opcode!");
4608 case X86::FP_TO_INT16_IN_MEM: Opc = X86::FpIST16m; break;
4609 case X86::FP_TO_INT32_IN_MEM: Opc = X86::FpIST32m; break;
4610 case X86::FP_TO_INT64_IN_MEM: Opc = X86::FpIST64m; break;
4611 }
4612
4613 X86AddressMode AM;
4614 MachineOperand &Op = MI->getOperand(0);
4615 if (Op.isRegister()) {
4616 AM.BaseType = X86AddressMode::RegBase;
4617 AM.Base.Reg = Op.getReg();
4618 } else {
4619 AM.BaseType = X86AddressMode::FrameIndexBase;
4620 AM.Base.FrameIndex = Op.getFrameIndex();
4621 }
4622 Op = MI->getOperand(1);
4623 if (Op.isImmediate())
4624 AM.Scale = Op.getImmedValue();
4625 Op = MI->getOperand(2);
4626 if (Op.isImmediate())
4627 AM.IndexReg = Op.getImmedValue();
4628 Op = MI->getOperand(3);
4629 if (Op.isGlobalAddress()) {
4630 AM.GV = Op.getGlobal();
4631 } else {
4632 AM.Disp = Op.getImmedValue();
4633 }
4634 addFullAddress(BuildMI(BB, Opc, 5), AM).addReg(MI->getOperand(4).getReg());
4635
4636 // Reload the original control word now.
4637 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
4638
4639 delete MI; // The pseudo instruction is gone now.
4640 return BB;
4641 }
4642 }
4643}
4644
4645//===----------------------------------------------------------------------===//
4646// X86 Optimization Hooks
4647//===----------------------------------------------------------------------===//
4648
Nate Begeman8a77efe2006-02-16 21:11:51 +00004649void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
4650 uint64_t Mask,
4651 uint64_t &KnownZero,
4652 uint64_t &KnownOne,
4653 unsigned Depth) const {
Evan Cheng9cdc16c2005-12-21 23:05:39 +00004654 unsigned Opc = Op.getOpcode();
Evan Cheng6d196db2006-04-05 06:11:20 +00004655 assert((Opc >= ISD::BUILTIN_OP_END ||
4656 Opc == ISD::INTRINSIC_WO_CHAIN ||
4657 Opc == ISD::INTRINSIC_W_CHAIN ||
4658 Opc == ISD::INTRINSIC_VOID) &&
4659 "Should use MaskedValueIsZero if you don't know whether Op"
4660 " is a target node!");
Evan Cheng9cdc16c2005-12-21 23:05:39 +00004661
Evan Cheng6d196db2006-04-05 06:11:20 +00004662 KnownZero = KnownOne = 0; // Don't know anything.
Evan Cheng9cdc16c2005-12-21 23:05:39 +00004663 switch (Opc) {
Evan Cheng6d196db2006-04-05 06:11:20 +00004664 default: break;
Nate Begeman8a77efe2006-02-16 21:11:51 +00004665 case X86ISD::SETCC:
4666 KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL);
4667 break;
Evan Cheng9cdc16c2005-12-21 23:05:39 +00004668 }
Evan Cheng9cdc16c2005-12-21 23:05:39 +00004669}
Chris Lattnerc642aa52006-01-31 19:43:35 +00004670
Evan Cheng5987cfb2006-07-07 08:33:52 +00004671/// getShuffleScalarElt - Returns the scalar element that will make up the ith
4672/// element of the result of the vector shuffle.
4673static SDOperand getShuffleScalarElt(SDNode *N, unsigned i, SelectionDAG &DAG) {
4674 MVT::ValueType VT = N->getValueType(0);
4675 SDOperand PermMask = N->getOperand(2);
4676 unsigned NumElems = PermMask.getNumOperands();
4677 SDOperand V = (i < NumElems) ? N->getOperand(0) : N->getOperand(1);
4678 i %= NumElems;
4679 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) {
4680 return (i == 0)
4681 ? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(VT));
4682 } else if (V.getOpcode() == ISD::VECTOR_SHUFFLE) {
4683 SDOperand Idx = PermMask.getOperand(i);
4684 if (Idx.getOpcode() == ISD::UNDEF)
4685 return DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(VT));
4686 return getShuffleScalarElt(V.Val,cast<ConstantSDNode>(Idx)->getValue(),DAG);
4687 }
4688 return SDOperand();
4689}
4690
4691/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
4692/// node is a GlobalAddress + an offset.
4693static bool isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) {
4694 if (N->getOpcode() == X86ISD::Wrapper) {
4695 if (dyn_cast<GlobalAddressSDNode>(N->getOperand(0))) {
4696 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
4697 return true;
4698 }
4699 } else if (N->getOpcode() == ISD::ADD) {
4700 SDOperand N1 = N->getOperand(0);
4701 SDOperand N2 = N->getOperand(1);
4702 if (isGAPlusOffset(N1.Val, GA, Offset)) {
4703 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2);
4704 if (V) {
4705 Offset += V->getSignExtended();
4706 return true;
4707 }
4708 } else if (isGAPlusOffset(N2.Val, GA, Offset)) {
4709 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1);
4710 if (V) {
4711 Offset += V->getSignExtended();
4712 return true;
4713 }
4714 }
4715 }
4716 return false;
4717}
4718
4719/// isConsecutiveLoad - Returns true if N is loading from an address of Base
4720/// + Dist * Size.
4721static bool isConsecutiveLoad(SDNode *N, SDNode *Base, int Dist, int Size,
4722 MachineFrameInfo *MFI) {
4723 if (N->getOperand(0).Val != Base->getOperand(0).Val)
4724 return false;
4725
4726 SDOperand Loc = N->getOperand(1);
4727 SDOperand BaseLoc = Base->getOperand(1);
4728 if (Loc.getOpcode() == ISD::FrameIndex) {
4729 if (BaseLoc.getOpcode() != ISD::FrameIndex)
4730 return false;
4731 int FI = dyn_cast<FrameIndexSDNode>(Loc)->getIndex();
4732 int BFI = dyn_cast<FrameIndexSDNode>(BaseLoc)->getIndex();
4733 int FS = MFI->getObjectSize(FI);
4734 int BFS = MFI->getObjectSize(BFI);
4735 if (FS != BFS || FS != Size) return false;
4736 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Size);
4737 } else {
4738 GlobalValue *GV1 = NULL;
4739 GlobalValue *GV2 = NULL;
4740 int64_t Offset1 = 0;
4741 int64_t Offset2 = 0;
4742 bool isGA1 = isGAPlusOffset(Loc.Val, GV1, Offset1);
4743 bool isGA2 = isGAPlusOffset(BaseLoc.Val, GV2, Offset2);
4744 if (isGA1 && isGA2 && GV1 == GV2)
4745 return Offset1 == (Offset2 + Dist*Size);
4746 }
4747
4748 return false;
4749}
4750
Evan Cheng79cf9a52006-07-10 21:37:44 +00004751static bool isBaseAlignment16(SDNode *Base, MachineFrameInfo *MFI,
4752 const X86Subtarget *Subtarget) {
Evan Cheng5987cfb2006-07-07 08:33:52 +00004753 GlobalValue *GV;
4754 int64_t Offset;
4755 if (isGAPlusOffset(Base, GV, Offset))
4756 return (GV->getAlignment() >= 16 && (Offset % 16) == 0);
4757 else {
4758 assert(Base->getOpcode() == ISD::FrameIndex && "Unexpected base node!");
4759 int BFI = dyn_cast<FrameIndexSDNode>(Base)->getIndex();
Evan Cheng79cf9a52006-07-10 21:37:44 +00004760 if (BFI < 0)
4761 // Fixed objects do not specify alignment, however the offsets are known.
4762 return ((Subtarget->getStackAlignment() % 16) == 0 &&
4763 (MFI->getObjectOffset(BFI) % 16) == 0);
4764 else
4765 return MFI->getObjectAlignment(BFI) >= 16;
Evan Cheng5987cfb2006-07-07 08:33:52 +00004766 }
4767 return false;
4768}
4769
4770
4771/// PerformShuffleCombine - Combine a vector_shuffle that is equal to
4772/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load
4773/// if the load addresses are consecutive, non-overlapping, and in the right
4774/// order.
Evan Cheng79cf9a52006-07-10 21:37:44 +00004775static SDOperand PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
4776 const X86Subtarget *Subtarget) {
Evan Cheng5987cfb2006-07-07 08:33:52 +00004777 MachineFunction &MF = DAG.getMachineFunction();
4778 MachineFrameInfo *MFI = MF.getFrameInfo();
4779 MVT::ValueType VT = N->getValueType(0);
4780 MVT::ValueType EVT = MVT::getVectorBaseType(VT);
4781 SDOperand PermMask = N->getOperand(2);
4782 int NumElems = (int)PermMask.getNumOperands();
4783 SDNode *Base = NULL;
4784 for (int i = 0; i < NumElems; ++i) {
4785 SDOperand Idx = PermMask.getOperand(i);
4786 if (Idx.getOpcode() == ISD::UNDEF) {
4787 if (!Base) return SDOperand();
4788 } else {
4789 SDOperand Arg =
4790 getShuffleScalarElt(N, cast<ConstantSDNode>(Idx)->getValue(), DAG);
4791 if (!Arg.Val || Arg.getOpcode() != ISD::LOAD)
4792 return SDOperand();
4793 if (!Base)
4794 Base = Arg.Val;
4795 else if (!isConsecutiveLoad(Arg.Val, Base,
4796 i, MVT::getSizeInBits(EVT)/8,MFI))
4797 return SDOperand();
4798 }
4799 }
4800
Evan Cheng79cf9a52006-07-10 21:37:44 +00004801 bool isAlign16 = isBaseAlignment16(Base->getOperand(1).Val, MFI, Subtarget);
Evan Cheng5987cfb2006-07-07 08:33:52 +00004802 if (isAlign16)
4803 return DAG.getLoad(VT, Base->getOperand(0), Base->getOperand(1),
4804 Base->getOperand(2));
Evan Cheng5c68bba2006-08-11 07:35:45 +00004805 else {
Evan Cheng5987cfb2006-07-07 08:33:52 +00004806 // Just use movups, it's shorter.
Evan Chengbd1c5a82006-08-11 09:08:15 +00004807 std::vector<MVT::ValueType> Tys;
4808 Tys.push_back(MVT::v4f32);
4809 Tys.push_back(MVT::Other);
4810 SmallVector<SDOperand, 3> Ops;
4811 Ops.push_back(Base->getOperand(0));
4812 Ops.push_back(Base->getOperand(1));
4813 Ops.push_back(Base->getOperand(2));
Evan Cheng5987cfb2006-07-07 08:33:52 +00004814 return DAG.getNode(ISD::BIT_CONVERT, VT,
Evan Chengbd1c5a82006-08-11 09:08:15 +00004815 DAG.getNode(X86ISD::LOAD_UA, Tys, &Ops[0], Ops.size()));
Evan Cheng5c68bba2006-08-11 07:35:45 +00004816 }
Evan Cheng5987cfb2006-07-07 08:33:52 +00004817}
4818
4819SDOperand X86TargetLowering::PerformDAGCombine(SDNode *N,
4820 DAGCombinerInfo &DCI) const {
4821 TargetMachine &TM = getTargetMachine();
4822 SelectionDAG &DAG = DCI.DAG;
4823 switch (N->getOpcode()) {
4824 default: break;
4825 case ISD::VECTOR_SHUFFLE:
Evan Cheng79cf9a52006-07-10 21:37:44 +00004826 return PerformShuffleCombine(N, DAG, Subtarget);
Evan Cheng5987cfb2006-07-07 08:33:52 +00004827 }
4828
4829 return SDOperand();
4830}
4831
Evan Cheng02612422006-07-05 22:17:51 +00004832//===----------------------------------------------------------------------===//
4833// X86 Inline Assembly Support
4834//===----------------------------------------------------------------------===//
4835
Chris Lattner298ef372006-07-11 02:54:03 +00004836/// getConstraintType - Given a constraint letter, return the type of
4837/// constraint it is for this target.
4838X86TargetLowering::ConstraintType
4839X86TargetLowering::getConstraintType(char ConstraintLetter) const {
4840 switch (ConstraintLetter) {
Chris Lattnerc8db1072006-07-12 16:59:49 +00004841 case 'A':
4842 case 'r':
4843 case 'R':
4844 case 'l':
4845 case 'q':
4846 case 'Q':
4847 case 'x':
4848 case 'Y':
4849 return C_RegisterClass;
Chris Lattner298ef372006-07-11 02:54:03 +00004850 default: return TargetLowering::getConstraintType(ConstraintLetter);
4851 }
4852}
4853
Chris Lattnerc642aa52006-01-31 19:43:35 +00004854std::vector<unsigned> X86TargetLowering::
Chris Lattner7ad77df2006-02-22 00:56:39 +00004855getRegClassForInlineAsmConstraint(const std::string &Constraint,
4856 MVT::ValueType VT) const {
Chris Lattnerc642aa52006-01-31 19:43:35 +00004857 if (Constraint.size() == 1) {
4858 // FIXME: not handling fp-stack yet!
4859 // FIXME: not handling MMX registers yet ('y' constraint).
4860 switch (Constraint[0]) { // GCC X86 Constraint Letters
Chris Lattner298ef372006-07-11 02:54:03 +00004861 default: break; // Unknown constraint letter
4862 case 'A': // EAX/EDX
4863 if (VT == MVT::i32 || VT == MVT::i64)
4864 return make_vector<unsigned>(X86::EAX, X86::EDX, 0);
4865 break;
Chris Lattnerc642aa52006-01-31 19:43:35 +00004866 case 'r': // GENERAL_REGS
4867 case 'R': // LEGACY_REGS
Chris Lattner6d4a2dc2006-05-06 00:29:37 +00004868 if (VT == MVT::i32)
4869 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX,
4870 X86::ESI, X86::EDI, X86::EBP, X86::ESP, 0);
4871 else if (VT == MVT::i16)
4872 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX,
4873 X86::SI, X86::DI, X86::BP, X86::SP, 0);
4874 else if (VT == MVT::i8)
4875 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::DL, 0);
4876 break;
Chris Lattnerc642aa52006-01-31 19:43:35 +00004877 case 'l': // INDEX_REGS
Chris Lattner6d4a2dc2006-05-06 00:29:37 +00004878 if (VT == MVT::i32)
4879 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX,
4880 X86::ESI, X86::EDI, X86::EBP, 0);
4881 else if (VT == MVT::i16)
4882 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX,
4883 X86::SI, X86::DI, X86::BP, 0);
4884 else if (VT == MVT::i8)
4885 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::DL, 0);
4886 break;
Chris Lattnerc642aa52006-01-31 19:43:35 +00004887 case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode)
4888 case 'Q': // Q_REGS
Chris Lattner6d4a2dc2006-05-06 00:29:37 +00004889 if (VT == MVT::i32)
4890 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0);
4891 else if (VT == MVT::i16)
4892 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0);
4893 else if (VT == MVT::i8)
4894 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::DL, 0);
4895 break;
Chris Lattnerc642aa52006-01-31 19:43:35 +00004896 case 'x': // SSE_REGS if SSE1 allowed
4897 if (Subtarget->hasSSE1())
4898 return make_vector<unsigned>(X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
4899 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7,
4900 0);
4901 return std::vector<unsigned>();
4902 case 'Y': // SSE_REGS if SSE2 allowed
4903 if (Subtarget->hasSSE2())
4904 return make_vector<unsigned>(X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
4905 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7,
4906 0);
4907 return std::vector<unsigned>();
4908 }
4909 }
4910
Chris Lattner7ad77df2006-02-22 00:56:39 +00004911 return std::vector<unsigned>();
Chris Lattnerc642aa52006-01-31 19:43:35 +00004912}
Chris Lattner524129d2006-07-31 23:26:50 +00004913
4914std::pair<unsigned, const TargetRegisterClass*>
4915X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
4916 MVT::ValueType VT) const {
4917 // Use the default implementation in TargetLowering to convert the register
4918 // constraint into a member of a register class.
4919 std::pair<unsigned, const TargetRegisterClass*> Res;
4920 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
4921
4922 // Not found? Bail out.
4923 if (Res.second == 0) return Res;
4924
4925 // Otherwise, check to see if this is a register class of the wrong value
4926 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
4927 // turn into {ax},{dx}.
4928 if (Res.second->hasType(VT))
4929 return Res; // Correct type already, nothing to do.
4930
4931 // All of the single-register GCC register classes map their values onto
4932 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we
4933 // really want an 8-bit or 32-bit register, map to the appropriate register
4934 // class and return the appropriate register.
4935 if (Res.second != X86::GR16RegisterClass)
4936 return Res;
4937
4938 if (VT == MVT::i8) {
4939 unsigned DestReg = 0;
4940 switch (Res.first) {
4941 default: break;
4942 case X86::AX: DestReg = X86::AL; break;
4943 case X86::DX: DestReg = X86::DL; break;
4944 case X86::CX: DestReg = X86::CL; break;
4945 case X86::BX: DestReg = X86::BL; break;
4946 }
4947 if (DestReg) {
4948 Res.first = DestReg;
4949 Res.second = Res.second = X86::GR8RegisterClass;
4950 }
4951 } else if (VT == MVT::i32) {
4952 unsigned DestReg = 0;
4953 switch (Res.first) {
4954 default: break;
4955 case X86::AX: DestReg = X86::EAX; break;
4956 case X86::DX: DestReg = X86::EDX; break;
4957 case X86::CX: DestReg = X86::ECX; break;
4958 case X86::BX: DestReg = X86::EBX; break;
4959 case X86::SI: DestReg = X86::ESI; break;
4960 case X86::DI: DestReg = X86::EDI; break;
4961 case X86::BP: DestReg = X86::EBP; break;
4962 case X86::SP: DestReg = X86::ESP; break;
4963 }
4964 if (DestReg) {
4965 Res.first = DestReg;
4966 Res.second = Res.second = X86::GR32RegisterClass;
4967 }
Evan Cheng11b0a5d2006-09-08 06:48:29 +00004968 } else if (VT == MVT::i64) {
4969 unsigned DestReg = 0;
4970 switch (Res.first) {
4971 default: break;
4972 case X86::AX: DestReg = X86::RAX; break;
4973 case X86::DX: DestReg = X86::RDX; break;
4974 case X86::CX: DestReg = X86::RCX; break;
4975 case X86::BX: DestReg = X86::RBX; break;
4976 case X86::SI: DestReg = X86::RSI; break;
4977 case X86::DI: DestReg = X86::RDI; break;
4978 case X86::BP: DestReg = X86::RBP; break;
4979 case X86::SP: DestReg = X86::RSP; break;
4980 }
4981 if (DestReg) {
4982 Res.first = DestReg;
4983 Res.second = Res.second = X86::GR64RegisterClass;
4984 }
Chris Lattner524129d2006-07-31 23:26:50 +00004985 }
4986
4987 return Res;
4988}
4989