blob: 61a4d7a511695c3515da8ba7abc1a2088992b07a [file] [log] [blame]
Arnold Schwaighofera70fe792007-10-12 21:53:12 +00001//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002//
3// The LLVM Compiler Infrastructure
4//
Chris Lattner081ce942007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the interfaces that X86 uses to lower LLVM code into a
11// selection DAG.
12//
13//===----------------------------------------------------------------------===//
14
15#include "X86.h"
16#include "X86InstrBuilder.h"
17#include "X86ISelLowering.h"
18#include "X86MachineFunctionInfo.h"
19#include "X86TargetMachine.h"
20#include "llvm/CallingConv.h"
21#include "llvm/Constants.h"
22#include "llvm/DerivedTypes.h"
23#include "llvm/GlobalVariable.h"
24#include "llvm/Function.h"
25#include "llvm/Intrinsics.h"
Evan Cheng75184a92007-12-11 01:46:18 +000026#include "llvm/ADT/BitVector.h"
Dan Gohmanf17a25c2007-07-18 16:29:46 +000027#include "llvm/ADT/VectorExtras.h"
28#include "llvm/Analysis/ScalarEvolutionExpressions.h"
29#include "llvm/CodeGen/CallingConvLower.h"
30#include "llvm/CodeGen/MachineFrameInfo.h"
31#include "llvm/CodeGen/MachineFunction.h"
32#include "llvm/CodeGen/MachineInstrBuilder.h"
Evan Cheng2e28d622008-02-02 04:07:54 +000033#include "llvm/CodeGen/MachineModuleInfo.h"
Chris Lattner1b989192007-12-31 04:13:23 +000034#include "llvm/CodeGen/MachineRegisterInfo.h"
Dan Gohman12a9c082008-02-06 22:27:42 +000035#include "llvm/CodeGen/PseudoSourceValue.h"
Dan Gohmanf17a25c2007-07-18 16:29:46 +000036#include "llvm/CodeGen/SelectionDAG.h"
Dan Gohmanf17a25c2007-07-18 16:29:46 +000037#include "llvm/Support/MathExtras.h"
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +000038#include "llvm/Support/Debug.h"
Dan Gohmanf17a25c2007-07-18 16:29:46 +000039#include "llvm/Target/TargetOptions.h"
Evan Cheng75184a92007-12-11 01:46:18 +000040#include "llvm/ADT/SmallSet.h"
Dan Gohmanf17a25c2007-07-18 16:29:46 +000041#include "llvm/ADT/StringExtras.h"
Duncan Sandsd8455ca2007-07-27 20:02:49 +000042#include "llvm/ParameterAttributes.h"
Dan Gohmanf17a25c2007-07-18 16:29:46 +000043using namespace llvm;
44
45X86TargetLowering::X86TargetLowering(TargetMachine &TM)
46 : TargetLowering(TM) {
47 Subtarget = &TM.getSubtarget<X86Subtarget>();
Dale Johannesene0e0fd02007-09-23 14:52:20 +000048 X86ScalarSSEf64 = Subtarget->hasSSE2();
49 X86ScalarSSEf32 = Subtarget->hasSSE1();
Dan Gohmanf17a25c2007-07-18 16:29:46 +000050 X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP;
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +000051
Chris Lattnerdec9cb52008-01-24 08:07:48 +000052 bool Fast = false;
Dan Gohmanf17a25c2007-07-18 16:29:46 +000053
54 RegInfo = TM.getRegisterInfo();
55
56 // Set up the TargetLowering object.
57
58 // X86 is weird, it always uses i8 for shift amounts and setcc results.
59 setShiftAmountType(MVT::i8);
60 setSetCCResultType(MVT::i8);
61 setSetCCResultContents(ZeroOrOneSetCCResult);
62 setSchedulingPreference(SchedulingForRegPressure);
63 setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0
64 setStackPointerRegisterToSaveRestore(X86StackPtr);
65
66 if (Subtarget->isTargetDarwin()) {
67 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
68 setUseUnderscoreSetJmp(false);
69 setUseUnderscoreLongJmp(false);
70 } else if (Subtarget->isTargetMingw()) {
71 // MS runtime is weird: it exports _setjmp, but longjmp!
72 setUseUnderscoreSetJmp(true);
73 setUseUnderscoreLongJmp(false);
74 } else {
75 setUseUnderscoreSetJmp(true);
76 setUseUnderscoreLongJmp(true);
77 }
78
79 // Set up the register classes.
80 addRegisterClass(MVT::i8, X86::GR8RegisterClass);
81 addRegisterClass(MVT::i16, X86::GR16RegisterClass);
82 addRegisterClass(MVT::i32, X86::GR32RegisterClass);
83 if (Subtarget->is64Bit())
84 addRegisterClass(MVT::i64, X86::GR64RegisterClass);
85
Duncan Sands082524c2008-01-23 20:39:46 +000086 setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote);
Dan Gohmanf17a25c2007-07-18 16:29:46 +000087
Chris Lattner3bc08502008-01-17 19:59:44 +000088 // We don't accept any truncstore of integer registers.
89 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
90 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
91 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
92 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
93 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
94 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
95
Dan Gohmanf17a25c2007-07-18 16:29:46 +000096 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
97 // operation.
98 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
99 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
100 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
101
102 if (Subtarget->is64Bit()) {
103 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand);
104 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
105 } else {
Dale Johannesene0e0fd02007-09-23 14:52:20 +0000106 if (X86ScalarSSEf64)
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000107 // If SSE i64 SINT_TO_FP is not available, expand i32 UINT_TO_FP.
108 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand);
109 else
110 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
111 }
112
113 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
114 // this operation.
115 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
116 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
117 // SSE has no i16 to fp conversion, only i32
Dale Johannesene0e0fd02007-09-23 14:52:20 +0000118 if (X86ScalarSSEf32) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000119 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
Dale Johannesen2fc20782007-09-14 22:26:36 +0000120 // f32 and f64 cases are Legal, f80 case is not
121 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
122 } else {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000123 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
124 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
125 }
126
Dale Johannesen958b08b2007-09-19 23:55:34 +0000127 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
128 // are Legal, f80 is custom lowered.
129 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
130 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000131
132 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
133 // this operation.
134 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
135 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
136
Dale Johannesene0e0fd02007-09-23 14:52:20 +0000137 if (X86ScalarSSEf32) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000138 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
Dale Johannesen2fc20782007-09-14 22:26:36 +0000139 // f32 and f64 cases are Legal, f80 case is not
140 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000141 } else {
142 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
143 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
144 }
145
146 // Handle FP_TO_UINT by promoting the destination to a larger signed
147 // conversion.
148 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
149 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
150 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
151
152 if (Subtarget->is64Bit()) {
153 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
154 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
155 } else {
Dale Johannesene0e0fd02007-09-23 14:52:20 +0000156 if (X86ScalarSSEf32 && !Subtarget->hasSSE3())
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000157 // Expand FP_TO_UINT into a select.
158 // FIXME: We would like to use a Custom expander here eventually to do
159 // the optimal thing for SSE vs. the default expansion in the legalizer.
160 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
161 else
162 // With SSE3 we can use fisttpll to convert to a signed i64.
163 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
164 }
165
166 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
Dale Johannesene0e0fd02007-09-23 14:52:20 +0000167 if (!X86ScalarSSEf64) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000168 setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand);
169 setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand);
170 }
171
Dan Gohman8450d862008-02-18 19:34:53 +0000172 // Scalar integer divide and remainder are lowered to use operations that
173 // produce two results, to match the available instructions. This exposes
174 // the two-result form to trivial CSE, which is able to combine x/y and x%y
175 // into a single instruction.
176 //
177 // Scalar integer multiply-high is also lowered to use two-result
178 // operations, to match the available instructions. However, plain multiply
179 // (low) operations are left as Legal, as there are single-result
180 // instructions for this in x86. Using the two-result multiply instructions
181 // when both high and low results are needed must be arranged by dagcombine.
Dan Gohman5a199552007-10-08 18:33:35 +0000182 setOperationAction(ISD::MULHS , MVT::i8 , Expand);
183 setOperationAction(ISD::MULHU , MVT::i8 , Expand);
184 setOperationAction(ISD::SDIV , MVT::i8 , Expand);
185 setOperationAction(ISD::UDIV , MVT::i8 , Expand);
186 setOperationAction(ISD::SREM , MVT::i8 , Expand);
187 setOperationAction(ISD::UREM , MVT::i8 , Expand);
Dan Gohman5a199552007-10-08 18:33:35 +0000188 setOperationAction(ISD::MULHS , MVT::i16 , Expand);
189 setOperationAction(ISD::MULHU , MVT::i16 , Expand);
190 setOperationAction(ISD::SDIV , MVT::i16 , Expand);
191 setOperationAction(ISD::UDIV , MVT::i16 , Expand);
192 setOperationAction(ISD::SREM , MVT::i16 , Expand);
193 setOperationAction(ISD::UREM , MVT::i16 , Expand);
Dan Gohman5a199552007-10-08 18:33:35 +0000194 setOperationAction(ISD::MULHS , MVT::i32 , Expand);
195 setOperationAction(ISD::MULHU , MVT::i32 , Expand);
196 setOperationAction(ISD::SDIV , MVT::i32 , Expand);
197 setOperationAction(ISD::UDIV , MVT::i32 , Expand);
198 setOperationAction(ISD::SREM , MVT::i32 , Expand);
199 setOperationAction(ISD::UREM , MVT::i32 , Expand);
Dan Gohman5a199552007-10-08 18:33:35 +0000200 setOperationAction(ISD::MULHS , MVT::i64 , Expand);
201 setOperationAction(ISD::MULHU , MVT::i64 , Expand);
202 setOperationAction(ISD::SDIV , MVT::i64 , Expand);
203 setOperationAction(ISD::UDIV , MVT::i64 , Expand);
204 setOperationAction(ISD::SREM , MVT::i64 , Expand);
205 setOperationAction(ISD::UREM , MVT::i64 , Expand);
Dan Gohman242a5ba2007-09-25 18:23:27 +0000206
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000207 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
208 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
209 setOperationAction(ISD::BR_CC , MVT::Other, Expand);
210 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand);
211 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand);
212 if (Subtarget->is64Bit())
Christopher Lamb0a7c8662007-08-10 21:48:46 +0000213 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
214 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
215 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000216 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
217 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
218 setOperationAction(ISD::FREM , MVT::f64 , Expand);
Dan Gohman819574c2008-01-31 00:41:03 +0000219 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
Anton Korobeynikovfbe230e2007-11-16 01:31:51 +0000220
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000221 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
Evan Cheng48679f42007-12-14 02:13:44 +0000222 setOperationAction(ISD::CTTZ , MVT::i8 , Custom);
223 setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000224 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
Evan Cheng48679f42007-12-14 02:13:44 +0000225 setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
226 setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000227 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
Evan Cheng48679f42007-12-14 02:13:44 +0000228 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
229 setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000230 if (Subtarget->is64Bit()) {
231 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
Evan Cheng48679f42007-12-14 02:13:44 +0000232 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
233 setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000234 }
235
236 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
237 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
238
239 // These should be promoted to a larger select which is supported.
240 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
241 setOperationAction(ISD::SELECT , MVT::i8 , Promote);
242 // X86 wants to expand cmov itself.
243 setOperationAction(ISD::SELECT , MVT::i16 , Custom);
244 setOperationAction(ISD::SELECT , MVT::i32 , Custom);
245 setOperationAction(ISD::SELECT , MVT::f32 , Custom);
246 setOperationAction(ISD::SELECT , MVT::f64 , Custom);
Dale Johannesen2fc20782007-09-14 22:26:36 +0000247 setOperationAction(ISD::SELECT , MVT::f80 , Custom);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000248 setOperationAction(ISD::SETCC , MVT::i8 , Custom);
249 setOperationAction(ISD::SETCC , MVT::i16 , Custom);
250 setOperationAction(ISD::SETCC , MVT::i32 , Custom);
251 setOperationAction(ISD::SETCC , MVT::f32 , Custom);
252 setOperationAction(ISD::SETCC , MVT::f64 , Custom);
Dale Johannesen2fc20782007-09-14 22:26:36 +0000253 setOperationAction(ISD::SETCC , MVT::f80 , Custom);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000254 if (Subtarget->is64Bit()) {
255 setOperationAction(ISD::SELECT , MVT::i64 , Custom);
256 setOperationAction(ISD::SETCC , MVT::i64 , Custom);
257 }
258 // X86 ret instruction may pop stack.
259 setOperationAction(ISD::RET , MVT::Other, Custom);
260 if (!Subtarget->is64Bit())
261 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
262
263 // Darwin ABI issue.
264 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
265 setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
266 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
267 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom);
268 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
269 if (Subtarget->is64Bit()) {
270 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom);
271 setOperationAction(ISD::JumpTable , MVT::i64 , Custom);
272 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
273 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom);
274 }
275 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
276 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
277 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
278 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
279 // X86 wants to expand memset / memcpy itself.
280 setOperationAction(ISD::MEMSET , MVT::Other, Custom);
281 setOperationAction(ISD::MEMCPY , MVT::Other, Custom);
282
Andrew Lenharth0531ec52008-02-16 14:46:26 +0000283 if (!Subtarget->hasSSE2())
284 setOperationAction(ISD::MEMBARRIER , MVT::Other, Expand);
285
286
Evan Cheng2e28d622008-02-02 04:07:54 +0000287 // Use the default ISD::LOCATION, ISD::DECLARE expansion.
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000288 setOperationAction(ISD::LOCATION, MVT::Other, Expand);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000289 // FIXME - use subtarget debug flags
290 if (!Subtarget->isTargetDarwin() &&
291 !Subtarget->isTargetELF() &&
292 !Subtarget->isTargetCygMing())
293 setOperationAction(ISD::LABEL, MVT::Other, Expand);
294
295 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
296 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand);
297 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
298 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
299 if (Subtarget->is64Bit()) {
300 // FIXME: Verify
301 setExceptionPointerRegister(X86::RAX);
302 setExceptionSelectorRegister(X86::RDX);
303 } else {
304 setExceptionPointerRegister(X86::EAX);
305 setExceptionSelectorRegister(X86::EDX);
306 }
Anton Korobeynikov23ca9c52007-09-03 00:36:06 +0000307 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000308
Duncan Sands7407a9f2007-09-11 14:10:23 +0000309 setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom);
Duncan Sandsd8455ca2007-07-27 20:02:49 +0000310
Chris Lattner56b941f2008-01-15 21:58:22 +0000311 setOperationAction(ISD::TRAP, MVT::Other, Legal);
Anton Korobeynikov39d40ba2008-01-15 07:02:33 +0000312
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000313 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
314 setOperationAction(ISD::VASTART , MVT::Other, Custom);
315 setOperationAction(ISD::VAARG , MVT::Other, Expand);
316 setOperationAction(ISD::VAEND , MVT::Other, Expand);
317 if (Subtarget->is64Bit())
318 setOperationAction(ISD::VACOPY , MVT::Other, Custom);
319 else
320 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
321
322 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
323 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
324 if (Subtarget->is64Bit())
325 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
326 if (Subtarget->isTargetCygMing())
327 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
328 else
329 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
330
Dale Johannesene0e0fd02007-09-23 14:52:20 +0000331 if (X86ScalarSSEf64) {
332 // f32 and f64 use SSE.
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000333 // Set up the FP register classes.
334 addRegisterClass(MVT::f32, X86::FR32RegisterClass);
335 addRegisterClass(MVT::f64, X86::FR64RegisterClass);
336
337 // Use ANDPD to simulate FABS.
338 setOperationAction(ISD::FABS , MVT::f64, Custom);
339 setOperationAction(ISD::FABS , MVT::f32, Custom);
340
341 // Use XORP to simulate FNEG.
342 setOperationAction(ISD::FNEG , MVT::f64, Custom);
343 setOperationAction(ISD::FNEG , MVT::f32, Custom);
344
345 // Use ANDPD and ORPD to simulate FCOPYSIGN.
346 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
347 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
348
349 // We don't support sin/cos/fmod
350 setOperationAction(ISD::FSIN , MVT::f64, Expand);
351 setOperationAction(ISD::FCOS , MVT::f64, Expand);
352 setOperationAction(ISD::FREM , MVT::f64, Expand);
353 setOperationAction(ISD::FSIN , MVT::f32, Expand);
354 setOperationAction(ISD::FCOS , MVT::f32, Expand);
355 setOperationAction(ISD::FREM , MVT::f32, Expand);
356
357 // Expand FP immediates into loads from the stack, except for the special
358 // cases we handle.
Dale Johannesene0e0fd02007-09-23 14:52:20 +0000359 addLegalFPImmediate(APFloat(+0.0)); // xorpd
360 addLegalFPImmediate(APFloat(+0.0f)); // xorps
Dale Johannesen8f83a6b2007-08-09 01:04:01 +0000361
Chris Lattnerdec9cb52008-01-24 08:07:48 +0000362 // Floating truncations from f80 and extensions to f80 go through memory.
363 // If optimizing, we lie about this though and handle it in
364 // InstructionSelectPreprocess so that dagcombine2 can hack on these.
365 if (Fast) {
366 setConvertAction(MVT::f32, MVT::f80, Expand);
367 setConvertAction(MVT::f64, MVT::f80, Expand);
368 setConvertAction(MVT::f80, MVT::f32, Expand);
369 setConvertAction(MVT::f80, MVT::f64, Expand);
370 }
Dale Johannesene0e0fd02007-09-23 14:52:20 +0000371 } else if (X86ScalarSSEf32) {
372 // Use SSE for f32, x87 for f64.
373 // Set up the FP register classes.
374 addRegisterClass(MVT::f32, X86::FR32RegisterClass);
375 addRegisterClass(MVT::f64, X86::RFP64RegisterClass);
376
377 // Use ANDPS to simulate FABS.
378 setOperationAction(ISD::FABS , MVT::f32, Custom);
379
380 // Use XORP to simulate FNEG.
381 setOperationAction(ISD::FNEG , MVT::f32, Custom);
382
383 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
384
385 // Use ANDPS and ORPS to simulate FCOPYSIGN.
386 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
387 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
388
389 // We don't support sin/cos/fmod
390 setOperationAction(ISD::FSIN , MVT::f32, Expand);
391 setOperationAction(ISD::FCOS , MVT::f32, Expand);
392 setOperationAction(ISD::FREM , MVT::f32, Expand);
393
Nate Begemane2ba64f2008-02-14 08:57:00 +0000394 // Special cases we handle for FP constants.
Dale Johannesene0e0fd02007-09-23 14:52:20 +0000395 addLegalFPImmediate(APFloat(+0.0f)); // xorps
396 addLegalFPImmediate(APFloat(+0.0)); // FLD0
397 addLegalFPImmediate(APFloat(+1.0)); // FLD1
398 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
399 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
400
Chris Lattnerdec9cb52008-01-24 08:07:48 +0000401 // SSE <-> X87 conversions go through memory. If optimizing, we lie about
402 // this though and handle it in InstructionSelectPreprocess so that
403 // dagcombine2 can hack on these.
404 if (Fast) {
405 setConvertAction(MVT::f32, MVT::f64, Expand);
406 setConvertAction(MVT::f32, MVT::f80, Expand);
407 setConvertAction(MVT::f80, MVT::f32, Expand);
408 setConvertAction(MVT::f64, MVT::f32, Expand);
409 // And x87->x87 truncations also.
410 setConvertAction(MVT::f80, MVT::f64, Expand);
411 }
Dale Johannesene0e0fd02007-09-23 14:52:20 +0000412
413 if (!UnsafeFPMath) {
414 setOperationAction(ISD::FSIN , MVT::f64 , Expand);
415 setOperationAction(ISD::FCOS , MVT::f64 , Expand);
416 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000417 } else {
Dale Johannesene0e0fd02007-09-23 14:52:20 +0000418 // f32 and f64 in x87.
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000419 // Set up the FP register classes.
420 addRegisterClass(MVT::f64, X86::RFP64RegisterClass);
421 addRegisterClass(MVT::f32, X86::RFP32RegisterClass);
422
423 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
424 setOperationAction(ISD::UNDEF, MVT::f32, Expand);
425 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
426 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
Dale Johannesen8f83a6b2007-08-09 01:04:01 +0000427
Chris Lattnerdec9cb52008-01-24 08:07:48 +0000428 // Floating truncations go through memory. If optimizing, we lie about
429 // this though and handle it in InstructionSelectPreprocess so that
430 // dagcombine2 can hack on these.
431 if (Fast) {
432 setConvertAction(MVT::f80, MVT::f32, Expand);
433 setConvertAction(MVT::f64, MVT::f32, Expand);
434 setConvertAction(MVT::f80, MVT::f64, Expand);
435 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000436
437 if (!UnsafeFPMath) {
438 setOperationAction(ISD::FSIN , MVT::f64 , Expand);
439 setOperationAction(ISD::FCOS , MVT::f64 , Expand);
440 }
Dale Johannesenbbe2b702007-08-30 00:23:21 +0000441 addLegalFPImmediate(APFloat(+0.0)); // FLD0
442 addLegalFPImmediate(APFloat(+1.0)); // FLD1
443 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
444 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
Dale Johannesene0e0fd02007-09-23 14:52:20 +0000445 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
446 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
447 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
448 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000449 }
450
Dale Johannesen4ab00bd2007-08-05 18:49:15 +0000451 // Long double always uses X87.
452 addRegisterClass(MVT::f80, X86::RFP80RegisterClass);
Dale Johannesen2fc20782007-09-14 22:26:36 +0000453 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
454 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
Chris Lattnerdd867392008-01-27 06:19:31 +0000455 {
Chris Lattnerdd867392008-01-27 06:19:31 +0000456 APFloat TmpFlt(+0.0);
457 TmpFlt.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven);
458 addLegalFPImmediate(TmpFlt); // FLD0
459 TmpFlt.changeSign();
460 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
461 APFloat TmpFlt2(+1.0);
462 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven);
463 addLegalFPImmediate(TmpFlt2); // FLD1
464 TmpFlt2.changeSign();
465 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
466 }
467
Dale Johannesen7f1076b2007-09-26 21:10:55 +0000468 if (!UnsafeFPMath) {
469 setOperationAction(ISD::FSIN , MVT::f80 , Expand);
470 setOperationAction(ISD::FCOS , MVT::f80 , Expand);
471 }
Dale Johannesen4ab00bd2007-08-05 18:49:15 +0000472
Dan Gohman2f7b1982007-10-11 23:21:31 +0000473 // Always use a library call for pow.
474 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
475 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
476 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
477
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000478 // First set operation action for all vector types to expand. Then we
479 // will selectively turn on ones that can be effectively codegen'd.
480 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
481 VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) {
482 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand);
483 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand);
484 setOperationAction(ISD::FADD, (MVT::ValueType)VT, Expand);
485 setOperationAction(ISD::FNEG, (MVT::ValueType)VT, Expand);
486 setOperationAction(ISD::FSUB, (MVT::ValueType)VT, Expand);
487 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand);
488 setOperationAction(ISD::FMUL, (MVT::ValueType)VT, Expand);
489 setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand);
490 setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand);
491 setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand);
492 setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand);
493 setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand);
494 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand);
495 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand);
496 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
497 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
498 setOperationAction(ISD::FABS, (MVT::ValueType)VT, Expand);
499 setOperationAction(ISD::FSIN, (MVT::ValueType)VT, Expand);
500 setOperationAction(ISD::FCOS, (MVT::ValueType)VT, Expand);
501 setOperationAction(ISD::FREM, (MVT::ValueType)VT, Expand);
502 setOperationAction(ISD::FPOWI, (MVT::ValueType)VT, Expand);
503 setOperationAction(ISD::FSQRT, (MVT::ValueType)VT, Expand);
504 setOperationAction(ISD::FCOPYSIGN, (MVT::ValueType)VT, Expand);
Dan Gohman5a199552007-10-08 18:33:35 +0000505 setOperationAction(ISD::SMUL_LOHI, (MVT::ValueType)VT, Expand);
506 setOperationAction(ISD::UMUL_LOHI, (MVT::ValueType)VT, Expand);
507 setOperationAction(ISD::SDIVREM, (MVT::ValueType)VT, Expand);
508 setOperationAction(ISD::UDIVREM, (MVT::ValueType)VT, Expand);
Dan Gohman2f7b1982007-10-11 23:21:31 +0000509 setOperationAction(ISD::FPOW, (MVT::ValueType)VT, Expand);
Dan Gohman1d2dc2c2007-10-12 14:09:42 +0000510 setOperationAction(ISD::CTPOP, (MVT::ValueType)VT, Expand);
511 setOperationAction(ISD::CTTZ, (MVT::ValueType)VT, Expand);
512 setOperationAction(ISD::CTLZ, (MVT::ValueType)VT, Expand);
Dan Gohman5b9d6412007-12-12 22:21:26 +0000513 setOperationAction(ISD::SHL, (MVT::ValueType)VT, Expand);
514 setOperationAction(ISD::SRA, (MVT::ValueType)VT, Expand);
515 setOperationAction(ISD::SRL, (MVT::ValueType)VT, Expand);
516 setOperationAction(ISD::ROTL, (MVT::ValueType)VT, Expand);
517 setOperationAction(ISD::ROTR, (MVT::ValueType)VT, Expand);
518 setOperationAction(ISD::BSWAP, (MVT::ValueType)VT, Expand);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000519 }
520
521 if (Subtarget->hasMMX()) {
522 addRegisterClass(MVT::v8i8, X86::VR64RegisterClass);
523 addRegisterClass(MVT::v4i16, X86::VR64RegisterClass);
524 addRegisterClass(MVT::v2i32, X86::VR64RegisterClass);
525 addRegisterClass(MVT::v1i64, X86::VR64RegisterClass);
526
527 // FIXME: add MMX packed arithmetics
528
529 setOperationAction(ISD::ADD, MVT::v8i8, Legal);
530 setOperationAction(ISD::ADD, MVT::v4i16, Legal);
531 setOperationAction(ISD::ADD, MVT::v2i32, Legal);
532 setOperationAction(ISD::ADD, MVT::v1i64, Legal);
533
534 setOperationAction(ISD::SUB, MVT::v8i8, Legal);
535 setOperationAction(ISD::SUB, MVT::v4i16, Legal);
536 setOperationAction(ISD::SUB, MVT::v2i32, Legal);
Dale Johannesen6b65c332007-10-30 01:18:38 +0000537 setOperationAction(ISD::SUB, MVT::v1i64, Legal);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000538
539 setOperationAction(ISD::MULHS, MVT::v4i16, Legal);
540 setOperationAction(ISD::MUL, MVT::v4i16, Legal);
541
542 setOperationAction(ISD::AND, MVT::v8i8, Promote);
543 AddPromotedToType (ISD::AND, MVT::v8i8, MVT::v1i64);
544 setOperationAction(ISD::AND, MVT::v4i16, Promote);
545 AddPromotedToType (ISD::AND, MVT::v4i16, MVT::v1i64);
546 setOperationAction(ISD::AND, MVT::v2i32, Promote);
547 AddPromotedToType (ISD::AND, MVT::v2i32, MVT::v1i64);
548 setOperationAction(ISD::AND, MVT::v1i64, Legal);
549
550 setOperationAction(ISD::OR, MVT::v8i8, Promote);
551 AddPromotedToType (ISD::OR, MVT::v8i8, MVT::v1i64);
552 setOperationAction(ISD::OR, MVT::v4i16, Promote);
553 AddPromotedToType (ISD::OR, MVT::v4i16, MVT::v1i64);
554 setOperationAction(ISD::OR, MVT::v2i32, Promote);
555 AddPromotedToType (ISD::OR, MVT::v2i32, MVT::v1i64);
556 setOperationAction(ISD::OR, MVT::v1i64, Legal);
557
558 setOperationAction(ISD::XOR, MVT::v8i8, Promote);
559 AddPromotedToType (ISD::XOR, MVT::v8i8, MVT::v1i64);
560 setOperationAction(ISD::XOR, MVT::v4i16, Promote);
561 AddPromotedToType (ISD::XOR, MVT::v4i16, MVT::v1i64);
562 setOperationAction(ISD::XOR, MVT::v2i32, Promote);
563 AddPromotedToType (ISD::XOR, MVT::v2i32, MVT::v1i64);
564 setOperationAction(ISD::XOR, MVT::v1i64, Legal);
565
566 setOperationAction(ISD::LOAD, MVT::v8i8, Promote);
567 AddPromotedToType (ISD::LOAD, MVT::v8i8, MVT::v1i64);
568 setOperationAction(ISD::LOAD, MVT::v4i16, Promote);
569 AddPromotedToType (ISD::LOAD, MVT::v4i16, MVT::v1i64);
570 setOperationAction(ISD::LOAD, MVT::v2i32, Promote);
571 AddPromotedToType (ISD::LOAD, MVT::v2i32, MVT::v1i64);
572 setOperationAction(ISD::LOAD, MVT::v1i64, Legal);
573
574 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom);
575 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
576 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom);
577 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom);
578
579 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom);
580 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom);
581 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom);
582 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom);
583
584 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Custom);
585 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Custom);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000586 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Custom);
587 }
588
589 if (Subtarget->hasSSE1()) {
590 addRegisterClass(MVT::v4f32, X86::VR128RegisterClass);
591
592 setOperationAction(ISD::FADD, MVT::v4f32, Legal);
593 setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
594 setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
595 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
596 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
597 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000598 setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
599 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
600 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
601 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
602 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
603 }
604
605 if (Subtarget->hasSSE2()) {
606 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass);
607 addRegisterClass(MVT::v16i8, X86::VR128RegisterClass);
608 addRegisterClass(MVT::v8i16, X86::VR128RegisterClass);
609 addRegisterClass(MVT::v4i32, X86::VR128RegisterClass);
610 addRegisterClass(MVT::v2i64, X86::VR128RegisterClass);
611
612 setOperationAction(ISD::ADD, MVT::v16i8, Legal);
613 setOperationAction(ISD::ADD, MVT::v8i16, Legal);
614 setOperationAction(ISD::ADD, MVT::v4i32, Legal);
615 setOperationAction(ISD::ADD, MVT::v2i64, Legal);
616 setOperationAction(ISD::SUB, MVT::v16i8, Legal);
617 setOperationAction(ISD::SUB, MVT::v8i16, Legal);
618 setOperationAction(ISD::SUB, MVT::v4i32, Legal);
619 setOperationAction(ISD::SUB, MVT::v2i64, Legal);
620 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
621 setOperationAction(ISD::FADD, MVT::v2f64, Legal);
622 setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
623 setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
624 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
625 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
626 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000627
628 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
629 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
630 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
631 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000632 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
633
634 // Custom lower build_vector, vector_shuffle, and extract_vector_elt.
635 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) {
Nate Begemanc16406d2007-12-11 01:41:33 +0000636 // Do not attempt to custom lower non-power-of-2 vectors
637 if (!isPowerOf2_32(MVT::getVectorNumElements(VT)))
638 continue;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000639 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Custom);
640 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Custom);
641 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Custom);
642 }
643 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
644 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
645 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
646 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
Nate Begeman4294c1f2008-02-12 22:51:28 +0000647 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000648 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
Nate Begeman4294c1f2008-02-12 22:51:28 +0000649 if (Subtarget->is64Bit()) {
650 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
Dale Johannesen2ff963d2007-10-31 00:32:36 +0000651 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
Nate Begeman4294c1f2008-02-12 22:51:28 +0000652 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000653
654 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
655 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) {
656 setOperationAction(ISD::AND, (MVT::ValueType)VT, Promote);
657 AddPromotedToType (ISD::AND, (MVT::ValueType)VT, MVT::v2i64);
658 setOperationAction(ISD::OR, (MVT::ValueType)VT, Promote);
659 AddPromotedToType (ISD::OR, (MVT::ValueType)VT, MVT::v2i64);
660 setOperationAction(ISD::XOR, (MVT::ValueType)VT, Promote);
661 AddPromotedToType (ISD::XOR, (MVT::ValueType)VT, MVT::v2i64);
662 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Promote);
663 AddPromotedToType (ISD::LOAD, (MVT::ValueType)VT, MVT::v2i64);
664 setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote);
665 AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v2i64);
666 }
667
Chris Lattner3bc08502008-01-17 19:59:44 +0000668 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
Chris Lattnerdec9cb52008-01-24 08:07:48 +0000669
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000670 // Custom lower v2i64 and v2f64 selects.
671 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
672 setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
673 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
674 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
675 }
Nate Begemand77e59e2008-02-11 04:19:36 +0000676
677 if (Subtarget->hasSSE41()) {
678 // FIXME: Do we need to handle scalar-to-vector here?
679 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
680
681 // i8 and i16 vectors are custom , because the source register and source
682 // source memory operand types are not the same width. f32 vectors are
683 // custom since the immediate controlling the insert encodes additional
684 // information.
685 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
686 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
687 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Legal);
688 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
689
690 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom);
691 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom);
692 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal);
693 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal);
694
695 if (Subtarget->is64Bit()) {
Nate Begeman4294c1f2008-02-12 22:51:28 +0000696 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Legal);
697 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal);
Nate Begemand77e59e2008-02-11 04:19:36 +0000698 }
699 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000700
701 // We want to custom lower some of our intrinsics.
702 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
703
704 // We have target-specific dag combine patterns for the following nodes:
705 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
706 setTargetDAGCombine(ISD::SELECT);
707
708 computeRegisterProperties();
709
710 // FIXME: These should be based on subtarget info. Plus, the values should
711 // be smaller when we are in optimizing for size mode.
712 maxStoresPerMemset = 16; // For %llvm.memset -> sequence of stores
713 maxStoresPerMemcpy = 16; // For %llvm.memcpy -> sequence of stores
714 maxStoresPerMemmove = 16; // For %llvm.memmove -> sequence of stores
715 allowUnalignedMemoryAccesses = true; // x86 supports it!
716}
717
Evan Cheng5a67b812008-01-23 23:17:41 +0000718/// getMaxByValAlign - Helper for getByValTypeAlignment to determine
719/// the desired ByVal argument alignment.
720static void getMaxByValAlign(const Type *Ty, unsigned &MaxAlign) {
721 if (MaxAlign == 16)
722 return;
723 if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
724 if (VTy->getBitWidth() == 128)
725 MaxAlign = 16;
Evan Cheng5a67b812008-01-23 23:17:41 +0000726 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
727 unsigned EltAlign = 0;
728 getMaxByValAlign(ATy->getElementType(), EltAlign);
729 if (EltAlign > MaxAlign)
730 MaxAlign = EltAlign;
731 } else if (const StructType *STy = dyn_cast<StructType>(Ty)) {
732 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
733 unsigned EltAlign = 0;
734 getMaxByValAlign(STy->getElementType(i), EltAlign);
735 if (EltAlign > MaxAlign)
736 MaxAlign = EltAlign;
737 if (MaxAlign == 16)
738 break;
739 }
740 }
741 return;
742}
743
744/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
745/// function arguments in the caller parameter area. For X86, aggregates
Dale Johannesena58b8622008-02-08 19:48:20 +0000746/// that contain SSE vectors are placed at 16-byte boundaries while the rest
747/// are at 4-byte boundaries.
Evan Cheng5a67b812008-01-23 23:17:41 +0000748unsigned X86TargetLowering::getByValTypeAlignment(const Type *Ty) const {
749 if (Subtarget->is64Bit())
750 return getTargetData()->getABITypeAlignment(Ty);
751 unsigned Align = 4;
Dale Johannesena58b8622008-02-08 19:48:20 +0000752 if (Subtarget->hasSSE1())
753 getMaxByValAlign(Ty, Align);
Evan Cheng5a67b812008-01-23 23:17:41 +0000754 return Align;
755}
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000756
Evan Cheng6fb06762007-11-09 01:32:10 +0000757/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
758/// jumptable.
759SDOperand X86TargetLowering::getPICJumpTableRelocBase(SDOperand Table,
760 SelectionDAG &DAG) const {
761 if (usesGlobalOffsetTable())
762 return DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, getPointerTy());
763 if (!Subtarget->isPICStyleRIPRel())
764 return DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy());
765 return Table;
766}
767
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000768//===----------------------------------------------------------------------===//
769// Return Value Calling Convention Implementation
770//===----------------------------------------------------------------------===//
771
772#include "X86GenCallingConv.inc"
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +0000773
774/// GetPossiblePreceedingTailCall - Get preceeding X86ISD::TAILCALL node if it
775/// exists skip possible ISD:TokenFactor.
776static SDOperand GetPossiblePreceedingTailCall(SDOperand Chain) {
Chris Lattnerf8decf52008-01-16 05:52:18 +0000777 if (Chain.getOpcode() == X86ISD::TAILCALL) {
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +0000778 return Chain;
Chris Lattnerf8decf52008-01-16 05:52:18 +0000779 } else if (Chain.getOpcode() == ISD::TokenFactor) {
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +0000780 if (Chain.getNumOperands() &&
Chris Lattnerf8decf52008-01-16 05:52:18 +0000781 Chain.getOperand(0).getOpcode() == X86ISD::TAILCALL)
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +0000782 return Chain.getOperand(0);
783 }
784 return Chain;
785}
Chris Lattnerf8decf52008-01-16 05:52:18 +0000786
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000787/// LowerRET - Lower an ISD::RET node.
788SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) {
789 assert((Op.getNumOperands() & 1) == 1 && "ISD::RET should have odd # args");
790
791 SmallVector<CCValAssign, 16> RVLocs;
792 unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv();
793 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
794 CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs);
795 CCInfo.AnalyzeReturn(Op.Val, RetCC_X86);
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +0000796
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000797 // If this is the first return lowered for this function, add the regs to the
798 // liveout set for the function.
Chris Lattner1b989192007-12-31 04:13:23 +0000799 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000800 for (unsigned i = 0; i != RVLocs.size(); ++i)
801 if (RVLocs[i].isRegLoc())
Chris Lattner1b989192007-12-31 04:13:23 +0000802 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000803 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000804 SDOperand Chain = Op.getOperand(0);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000805
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +0000806 // Handle tail call return.
807 Chain = GetPossiblePreceedingTailCall(Chain);
808 if (Chain.getOpcode() == X86ISD::TAILCALL) {
809 SDOperand TailCall = Chain;
810 SDOperand TargetAddress = TailCall.getOperand(1);
811 SDOperand StackAdjustment = TailCall.getOperand(2);
Chris Lattnerf8decf52008-01-16 05:52:18 +0000812 assert(((TargetAddress.getOpcode() == ISD::Register &&
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +0000813 (cast<RegisterSDNode>(TargetAddress)->getReg() == X86::ECX ||
814 cast<RegisterSDNode>(TargetAddress)->getReg() == X86::R9)) ||
815 TargetAddress.getOpcode() == ISD::TargetExternalSymbol ||
816 TargetAddress.getOpcode() == ISD::TargetGlobalAddress) &&
817 "Expecting an global address, external symbol, or register");
Chris Lattnerf8decf52008-01-16 05:52:18 +0000818 assert(StackAdjustment.getOpcode() == ISD::Constant &&
819 "Expecting a const value");
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +0000820
821 SmallVector<SDOperand,8> Operands;
822 Operands.push_back(Chain.getOperand(0));
823 Operands.push_back(TargetAddress);
824 Operands.push_back(StackAdjustment);
825 // Copy registers used by the call. Last operand is a flag so it is not
826 // copied.
Arnold Schwaighofer10202b32007-10-16 09:05:00 +0000827 for (unsigned i=3; i < TailCall.getNumOperands()-1; i++) {
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +0000828 Operands.push_back(Chain.getOperand(i));
829 }
Arnold Schwaighofer10202b32007-10-16 09:05:00 +0000830 return DAG.getNode(X86ISD::TC_RETURN, MVT::Other, &Operands[0],
831 Operands.size());
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +0000832 }
833
834 // Regular return.
835 SDOperand Flag;
836
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000837 // Copy the result values into the output registers.
838 if (RVLocs.size() != 1 || !RVLocs[0].isRegLoc() ||
839 RVLocs[0].getLocReg() != X86::ST0) {
840 for (unsigned i = 0; i != RVLocs.size(); ++i) {
841 CCValAssign &VA = RVLocs[i];
842 assert(VA.isRegLoc() && "Can only return in registers!");
843 Chain = DAG.getCopyToReg(Chain, VA.getLocReg(), Op.getOperand(i*2+1),
844 Flag);
845 Flag = Chain.getValue(1);
846 }
847 } else {
848 // We need to handle a destination of ST0 specially, because it isn't really
849 // a register.
850 SDOperand Value = Op.getOperand(1);
851
Chris Lattnerdec9cb52008-01-24 08:07:48 +0000852 // an XMM register onto the fp-stack. Do this with an FP_EXTEND to f80.
853 // This will get legalized into a load/store if it can't get optimized away.
854 if (isScalarFPTypeInSSEReg(RVLocs[0].getValVT()))
855 Value = DAG.getNode(ISD::FP_EXTEND, MVT::f80, Value);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000856
857 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
858 SDOperand Ops[] = { Chain, Value };
859 Chain = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, Ops, 2);
860 Flag = Chain.getValue(1);
861 }
862
863 SDOperand BytesToPop = DAG.getConstant(getBytesToPopOnReturn(), MVT::i16);
864 if (Flag.Val)
865 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop, Flag);
866 else
867 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop);
868}
869
870
871/// LowerCallResult - Lower the result values of an ISD::CALL into the
872/// appropriate copies out of appropriate physical registers. This assumes that
873/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call
874/// being lowered. The returns a SDNode with the same number of values as the
875/// ISD::CALL.
876SDNode *X86TargetLowering::
877LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode *TheCall,
878 unsigned CallingConv, SelectionDAG &DAG) {
879
880 // Assign locations to each value returned by this call.
881 SmallVector<CCValAssign, 16> RVLocs;
882 bool isVarArg = cast<ConstantSDNode>(TheCall->getOperand(2))->getValue() != 0;
883 CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs);
884 CCInfo.AnalyzeCallResult(TheCall, RetCC_X86);
885
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000886 SmallVector<SDOperand, 8> ResultVals;
887
888 // Copy all of the result registers out of their specified physreg.
889 if (RVLocs.size() != 1 || RVLocs[0].getLocReg() != X86::ST0) {
890 for (unsigned i = 0; i != RVLocs.size(); ++i) {
891 Chain = DAG.getCopyFromReg(Chain, RVLocs[i].getLocReg(),
892 RVLocs[i].getValVT(), InFlag).getValue(1);
893 InFlag = Chain.getValue(2);
894 ResultVals.push_back(Chain.getValue(0));
895 }
896 } else {
897 // Copies from the FP stack are special, as ST0 isn't a valid register
898 // before the fp stackifier runs.
899
Chris Lattnerdec9cb52008-01-24 08:07:48 +0000900 // Copy ST0 into an RFP register with FP_GET_RESULT. If this will end up
901 // in an SSE register, copy it out as F80 and do a truncate, otherwise use
902 // the specified value type.
903 MVT::ValueType GetResultTy = RVLocs[0].getValVT();
904 if (isScalarFPTypeInSSEReg(GetResultTy))
905 GetResultTy = MVT::f80;
906 SDVTList Tys = DAG.getVTList(GetResultTy, MVT::Other, MVT::Flag);
907
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000908 SDOperand GROps[] = { Chain, InFlag };
909 SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, GROps, 2);
910 Chain = RetVal.getValue(1);
911 InFlag = RetVal.getValue(2);
Chris Lattner40758732007-12-29 06:41:28 +0000912
Chris Lattnerdec9cb52008-01-24 08:07:48 +0000913 // If we want the result in an SSE register, use an FP_TRUNCATE to get it
914 // there.
915 if (GetResultTy != RVLocs[0].getValVT())
916 RetVal = DAG.getNode(ISD::FP_ROUND, RVLocs[0].getValVT(), RetVal,
917 // This truncation won't change the value.
918 DAG.getIntPtrConstant(1));
919
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000920 ResultVals.push_back(RetVal);
921 }
922
923 // Merge everything together with a MERGE_VALUES node.
924 ResultVals.push_back(Chain);
925 return DAG.getNode(ISD::MERGE_VALUES, TheCall->getVTList(),
926 &ResultVals[0], ResultVals.size()).Val;
927}
928
Evan Cheng931a8f42008-01-29 19:34:22 +0000929/// LowerCallResultToTwo64BitRegs - Lower the result values of an x86-64
930/// ISD::CALL where the results are known to be in two 64-bit registers,
931/// e.g. XMM0 and XMM1. This simplify store the two values back to the
932/// fixed stack slot allocated for StructRet.
933SDNode *X86TargetLowering::
934LowerCallResultToTwo64BitRegs(SDOperand Chain, SDOperand InFlag,
935 SDNode *TheCall, unsigned Reg1, unsigned Reg2,
936 MVT::ValueType VT, SelectionDAG &DAG) {
937 SDOperand RetVal1 = DAG.getCopyFromReg(Chain, Reg1, VT, InFlag);
938 Chain = RetVal1.getValue(1);
939 InFlag = RetVal1.getValue(2);
940 SDOperand RetVal2 = DAG.getCopyFromReg(Chain, Reg2, VT, InFlag);
941 Chain = RetVal2.getValue(1);
942 InFlag = RetVal2.getValue(2);
943 SDOperand FIN = TheCall->getOperand(5);
944 Chain = DAG.getStore(Chain, RetVal1, FIN, NULL, 0);
945 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(8));
946 Chain = DAG.getStore(Chain, RetVal2, FIN, NULL, 0);
947 return Chain.Val;
948}
949
950/// LowerCallResultToTwoX87Regs - Lower the result values of an x86-64 ISD::CALL
951/// where the results are known to be in ST0 and ST1.
952SDNode *X86TargetLowering::
953LowerCallResultToTwoX87Regs(SDOperand Chain, SDOperand InFlag,
954 SDNode *TheCall, SelectionDAG &DAG) {
955 SmallVector<SDOperand, 8> ResultVals;
956 const MVT::ValueType VTs[] = { MVT::f80, MVT::f80, MVT::Other, MVT::Flag };
957 SDVTList Tys = DAG.getVTList(VTs, 4);
958 SDOperand Ops[] = { Chain, InFlag };
959 SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT2, Tys, Ops, 2);
960 Chain = RetVal.getValue(2);
961 SDOperand FIN = TheCall->getOperand(5);
962 Chain = DAG.getStore(Chain, RetVal.getValue(1), FIN, NULL, 0);
963 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(16));
964 Chain = DAG.getStore(Chain, RetVal, FIN, NULL, 0);
965 return Chain.Val;
966}
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000967
968//===----------------------------------------------------------------------===//
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +0000969// C & StdCall & Fast Calling Convention implementation
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000970//===----------------------------------------------------------------------===//
971// StdCall calling convention seems to be standard for many Windows' API
972// routines and around. It differs from C calling convention just a little:
973// callee should clean up the stack, not caller. Symbols should be also
974// decorated in some fancy way :) It doesn't support any vector arguments.
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +0000975// For info on fast calling convention see Fast Calling Convention (tail call)
976// implementation LowerX86_32FastCCCallTo.
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000977
978/// AddLiveIn - This helper function adds the specified physical register to the
979/// MachineFunction as a live in value. It also creates a corresponding virtual
980/// register for it.
981static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg,
982 const TargetRegisterClass *RC) {
983 assert(RC->contains(PReg) && "Not the correct regclass!");
Chris Lattner1b989192007-12-31 04:13:23 +0000984 unsigned VReg = MF.getRegInfo().createVirtualRegister(RC);
985 MF.getRegInfo().addLiveIn(PReg, VReg);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000986 return VReg;
987}
988
Gordon Henriksen18ace102008-01-05 16:56:59 +0000989// Determines whether a CALL node uses struct return semantics.
990static bool CallIsStructReturn(SDOperand Op) {
991 unsigned NumOps = (Op.getNumOperands() - 5) / 2;
992 if (!NumOps)
993 return false;
994
995 ConstantSDNode *Flags = cast<ConstantSDNode>(Op.getOperand(6));
996 return Flags->getValue() & ISD::ParamFlags::StructReturn;
997}
998
999// Determines whether a FORMAL_ARGUMENTS node uses struct return semantics.
1000static bool ArgsAreStructReturn(SDOperand Op) {
1001 unsigned NumArgs = Op.Val->getNumValues() - 1;
1002 if (!NumArgs)
1003 return false;
1004
1005 ConstantSDNode *Flags = cast<ConstantSDNode>(Op.getOperand(3));
1006 return Flags->getValue() & ISD::ParamFlags::StructReturn;
1007}
1008
1009// Determines whether a CALL or FORMAL_ARGUMENTS node requires the callee to pop
1010// its own arguments. Callee pop is necessary to support tail calls.
1011bool X86TargetLowering::IsCalleePop(SDOperand Op) {
1012 bool IsVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
1013 if (IsVarArg)
1014 return false;
1015
1016 switch (cast<ConstantSDNode>(Op.getOperand(1))->getValue()) {
1017 default:
1018 return false;
1019 case CallingConv::X86_StdCall:
1020 return !Subtarget->is64Bit();
1021 case CallingConv::X86_FastCall:
1022 return !Subtarget->is64Bit();
1023 case CallingConv::Fast:
1024 return PerformTailCallOpt;
1025 }
1026}
1027
1028// Selects the correct CCAssignFn for a CALL or FORMAL_ARGUMENTS node.
1029CCAssignFn *X86TargetLowering::CCAssignFnForNode(SDOperand Op) const {
1030 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
1031
1032 if (Subtarget->is64Bit())
1033 if (CC == CallingConv::Fast && PerformTailCallOpt)
1034 return CC_X86_64_TailCall;
1035 else
1036 return CC_X86_64_C;
1037
1038 if (CC == CallingConv::X86_FastCall)
1039 return CC_X86_32_FastCall;
1040 else if (CC == CallingConv::Fast && PerformTailCallOpt)
1041 return CC_X86_32_TailCall;
1042 else
1043 return CC_X86_32_C;
1044}
1045
1046// Selects the appropriate decoration to apply to a MachineFunction containing a
1047// given FORMAL_ARGUMENTS node.
1048NameDecorationStyle
1049X86TargetLowering::NameDecorationForFORMAL_ARGUMENTS(SDOperand Op) {
1050 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
1051 if (CC == CallingConv::X86_FastCall)
1052 return FastCall;
1053 else if (CC == CallingConv::X86_StdCall)
1054 return StdCall;
1055 return None;
1056}
1057
Arnold Schwaighofer449b01a2008-01-11 16:49:42 +00001058
Arnold Schwaighofer0e3c27e2008-01-11 17:10:15 +00001059// IsPossiblyOverwrittenArgumentOfTailCall - Check if the operand could possibly
1060// be overwritten when lowering the outgoing arguments in a tail call. Currently
Arnold Schwaighofer449b01a2008-01-11 16:49:42 +00001061// the implementation of this call is very conservative and assumes all
1062// arguments sourcing from FORMAL_ARGUMENTS or a CopyFromReg with virtual
Arnold Schwaighofer0e3c27e2008-01-11 17:10:15 +00001063// registers would be overwritten by direct lowering.
Arnold Schwaighofer449b01a2008-01-11 16:49:42 +00001064// Possible improvement:
1065// Check FORMAL_ARGUMENTS corresponding MERGE_VALUES for CopyFromReg nodes
1066// indicating inreg passed arguments which also need not be lowered to a safe
1067// stack slot.
Arnold Schwaighofer0e3c27e2008-01-11 17:10:15 +00001068static bool IsPossiblyOverwrittenArgumentOfTailCall(SDOperand Op) {
Arnold Schwaighofer449b01a2008-01-11 16:49:42 +00001069 RegisterSDNode * OpReg = NULL;
1070 if (Op.getOpcode() == ISD::FORMAL_ARGUMENTS ||
1071 (Op.getOpcode()== ISD::CopyFromReg &&
1072 (OpReg = cast<RegisterSDNode>(Op.getOperand(1))) &&
Dan Gohman1e57df32008-02-10 18:45:23 +00001073 OpReg->getReg() >= TargetRegisterInfo::FirstVirtualRegister))
Arnold Schwaighofer449b01a2008-01-11 16:49:42 +00001074 return true;
1075 return false;
1076}
1077
Evan Cheng5817a0e2008-01-12 01:08:07 +00001078// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
1079// by "Src" to address "Dst" with size and alignment information specified by
1080// the specific parameter attribute. The copy will be passed as a byval function
1081// parameter.
Arnold Schwaighofer449b01a2008-01-11 16:49:42 +00001082static SDOperand
Evan Cheng5817a0e2008-01-12 01:08:07 +00001083CreateCopyOfByValArgument(SDOperand Src, SDOperand Dst, SDOperand Chain,
1084 unsigned Flags, SelectionDAG &DAG) {
1085 unsigned Align = 1 <<
1086 ((Flags & ISD::ParamFlags::ByValAlign) >> ISD::ParamFlags::ByValAlignOffs);
1087 unsigned Size = (Flags & ISD::ParamFlags::ByValSize) >>
Arnold Schwaighofer449b01a2008-01-11 16:49:42 +00001088 ISD::ParamFlags::ByValSizeOffs;
Evan Cheng5817a0e2008-01-12 01:08:07 +00001089 SDOperand AlignNode = DAG.getConstant(Align, MVT::i32);
1090 SDOperand SizeNode = DAG.getConstant(Size, MVT::i32);
Arnold Schwaighofer449b01a2008-01-11 16:49:42 +00001091 SDOperand AlwaysInline = DAG.getConstant(1, MVT::i32);
Evan Cheng5817a0e2008-01-12 01:08:07 +00001092 return DAG.getMemcpy(Chain, Dst, Src, SizeNode, AlignNode, AlwaysInline);
Arnold Schwaighofer449b01a2008-01-11 16:49:42 +00001093}
1094
Rafael Espindola03cbeb72007-09-14 15:48:13 +00001095SDOperand X86TargetLowering::LowerMemArgument(SDOperand Op, SelectionDAG &DAG,
1096 const CCValAssign &VA,
1097 MachineFrameInfo *MFI,
1098 SDOperand Root, unsigned i) {
1099 // Create the nodes corresponding to a load from this parameter slot.
Evan Cheng3e42a522008-01-10 02:24:25 +00001100 unsigned Flags = cast<ConstantSDNode>(Op.getOperand(3 + i))->getValue();
1101 bool isByVal = Flags & ISD::ParamFlags::ByVal;
1102
1103 // FIXME: For now, all byval parameter objects are marked mutable. This
1104 // can be changed with more analysis.
Rafael Espindola03cbeb72007-09-14 15:48:13 +00001105 int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8,
Evan Cheng3e42a522008-01-10 02:24:25 +00001106 VA.getLocMemOffset(), !isByVal);
Rafael Espindola03cbeb72007-09-14 15:48:13 +00001107 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy());
Evan Cheng3e42a522008-01-10 02:24:25 +00001108 if (isByVal)
Rafael Espindola03cbeb72007-09-14 15:48:13 +00001109 return FIN;
Dan Gohman12a9c082008-02-06 22:27:42 +00001110 return DAG.getLoad(VA.getValVT(), Root, FIN,
Dan Gohmanfb020b62008-02-07 18:41:25 +00001111 PseudoSourceValue::getFixedStack(), FI);
Rafael Espindola03cbeb72007-09-14 15:48:13 +00001112}
1113
Gordon Henriksen18ace102008-01-05 16:56:59 +00001114SDOperand
1115X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001116 MachineFunction &MF = DAG.getMachineFunction();
Gordon Henriksen18ace102008-01-05 16:56:59 +00001117 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
1118
1119 const Function* Fn = MF.getFunction();
1120 if (Fn->hasExternalLinkage() &&
1121 Subtarget->isTargetCygMing() &&
1122 Fn->getName() == "main")
1123 FuncInfo->setForceFramePointer(true);
1124
1125 // Decorate the function name.
1126 FuncInfo->setDecorationStyle(NameDecorationForFORMAL_ARGUMENTS(Op));
1127
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001128 MachineFrameInfo *MFI = MF.getFrameInfo();
1129 SDOperand Root = Op.getOperand(0);
1130 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +00001131 unsigned CC = MF.getFunction()->getCallingConv();
Gordon Henriksen18ace102008-01-05 16:56:59 +00001132 bool Is64Bit = Subtarget->is64Bit();
Gordon Henriksen6bbcc672008-01-03 16:47:34 +00001133
1134 assert(!(isVarArg && CC == CallingConv::Fast) &&
1135 "Var args not supported with calling convention fastcc");
1136
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001137 // Assign locations to all of the incoming arguments.
1138 SmallVector<CCValAssign, 16> ArgLocs;
Gordon Henriksen6bbcc672008-01-03 16:47:34 +00001139 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
Gordon Henriksen18ace102008-01-05 16:56:59 +00001140 CCInfo.AnalyzeFormalArguments(Op.Val, CCAssignFnForNode(Op));
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +00001141
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001142 SmallVector<SDOperand, 8> ArgValues;
1143 unsigned LastVal = ~0U;
1144 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1145 CCValAssign &VA = ArgLocs[i];
1146 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later
1147 // places.
1148 assert(VA.getValNo() != LastVal &&
1149 "Don't support value assigned to multiple locs yet");
1150 LastVal = VA.getValNo();
1151
1152 if (VA.isRegLoc()) {
1153 MVT::ValueType RegVT = VA.getLocVT();
1154 TargetRegisterClass *RC;
1155 if (RegVT == MVT::i32)
1156 RC = X86::GR32RegisterClass;
Gordon Henriksen18ace102008-01-05 16:56:59 +00001157 else if (Is64Bit && RegVT == MVT::i64)
1158 RC = X86::GR64RegisterClass;
Dale Johannesen51552f62008-02-05 20:46:33 +00001159 else if (RegVT == MVT::f32)
Gordon Henriksen18ace102008-01-05 16:56:59 +00001160 RC = X86::FR32RegisterClass;
Dale Johannesen51552f62008-02-05 20:46:33 +00001161 else if (RegVT == MVT::f64)
Gordon Henriksen18ace102008-01-05 16:56:59 +00001162 RC = X86::FR64RegisterClass;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001163 else {
1164 assert(MVT::isVector(RegVT));
Gordon Henriksen18ace102008-01-05 16:56:59 +00001165 if (Is64Bit && MVT::getSizeInBits(RegVT) == 64) {
1166 RC = X86::GR64RegisterClass; // MMX values are passed in GPRs.
1167 RegVT = MVT::i64;
1168 } else
1169 RC = X86::VR128RegisterClass;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001170 }
Gordon Henriksen6bbcc672008-01-03 16:47:34 +00001171
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001172 unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC);
1173 SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT);
1174
1175 // If this is an 8 or 16-bit value, it is really passed promoted to 32
1176 // bits. Insert an assert[sz]ext to capture this, then truncate to the
1177 // right size.
1178 if (VA.getLocInfo() == CCValAssign::SExt)
1179 ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue,
1180 DAG.getValueType(VA.getValVT()));
1181 else if (VA.getLocInfo() == CCValAssign::ZExt)
1182 ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue,
1183 DAG.getValueType(VA.getValVT()));
1184
1185 if (VA.getLocInfo() != CCValAssign::Full)
1186 ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue);
1187
Gordon Henriksen18ace102008-01-05 16:56:59 +00001188 // Handle MMX values passed in GPRs.
1189 if (Is64Bit && RegVT != VA.getLocVT() && RC == X86::GR64RegisterClass &&
1190 MVT::getSizeInBits(RegVT) == 64)
1191 ArgValue = DAG.getNode(ISD::BIT_CONVERT, VA.getLocVT(), ArgValue);
1192
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001193 ArgValues.push_back(ArgValue);
1194 } else {
1195 assert(VA.isMemLoc());
Rafael Espindola03cbeb72007-09-14 15:48:13 +00001196 ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, Root, i));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001197 }
1198 }
Gordon Henriksen6bbcc672008-01-03 16:47:34 +00001199
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001200 unsigned StackSize = CCInfo.getNextStackOffset();
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +00001201 // align stack specially for tail calls
Gordon Henriksen6bbcc672008-01-03 16:47:34 +00001202 if (CC == CallingConv::Fast)
1203 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001204
1205 // If the function takes variable number of arguments, make a frame index for
1206 // the start of the first vararg value... for expansion of llvm.va_start.
Gordon Henriksen6bbcc672008-01-03 16:47:34 +00001207 if (isVarArg) {
Gordon Henriksen18ace102008-01-05 16:56:59 +00001208 if (Is64Bit || CC != CallingConv::X86_FastCall) {
1209 VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize);
1210 }
1211 if (Is64Bit) {
1212 static const unsigned GPR64ArgRegs[] = {
1213 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
1214 };
1215 static const unsigned XMMArgRegs[] = {
1216 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
1217 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
1218 };
1219
1220 unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 6);
1221 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
1222
1223 // For X86-64, if there are vararg parameters that are passed via
1224 // registers, then we must store them to their spots on the stack so they
1225 // may be loaded by deferencing the result of va_next.
1226 VarArgsGPOffset = NumIntRegs * 8;
1227 VarArgsFPOffset = 6 * 8 + NumXMMRegs * 16;
1228 RegSaveFrameIndex = MFI->CreateStackObject(6 * 8 + 8 * 16, 16);
1229
1230 // Store the integer parameter registers.
1231 SmallVector<SDOperand, 8> MemOps;
1232 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy());
1233 SDOperand FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN,
Chris Lattner5872a362008-01-17 07:00:52 +00001234 DAG.getIntPtrConstant(VarArgsGPOffset));
Gordon Henriksen18ace102008-01-05 16:56:59 +00001235 for (; NumIntRegs != 6; ++NumIntRegs) {
1236 unsigned VReg = AddLiveIn(MF, GPR64ArgRegs[NumIntRegs],
1237 X86::GR64RegisterClass);
1238 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::i64);
Dan Gohman12a9c082008-02-06 22:27:42 +00001239 SDOperand Store =
1240 DAG.getStore(Val.getValue(1), Val, FIN,
Dan Gohmanfb020b62008-02-07 18:41:25 +00001241 PseudoSourceValue::getFixedStack(),
Dan Gohman12a9c082008-02-06 22:27:42 +00001242 RegSaveFrameIndex);
Gordon Henriksen18ace102008-01-05 16:56:59 +00001243 MemOps.push_back(Store);
1244 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
Chris Lattner5872a362008-01-17 07:00:52 +00001245 DAG.getIntPtrConstant(8));
Gordon Henriksen18ace102008-01-05 16:56:59 +00001246 }
1247
1248 // Now store the XMM (fp + vector) parameter registers.
1249 FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN,
Chris Lattner5872a362008-01-17 07:00:52 +00001250 DAG.getIntPtrConstant(VarArgsFPOffset));
Gordon Henriksen18ace102008-01-05 16:56:59 +00001251 for (; NumXMMRegs != 8; ++NumXMMRegs) {
1252 unsigned VReg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs],
1253 X86::VR128RegisterClass);
1254 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::v4f32);
Dan Gohman12a9c082008-02-06 22:27:42 +00001255 SDOperand Store =
1256 DAG.getStore(Val.getValue(1), Val, FIN,
Dan Gohmanfb020b62008-02-07 18:41:25 +00001257 PseudoSourceValue::getFixedStack(),
Dan Gohman12a9c082008-02-06 22:27:42 +00001258 RegSaveFrameIndex);
Gordon Henriksen18ace102008-01-05 16:56:59 +00001259 MemOps.push_back(Store);
1260 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
Chris Lattner5872a362008-01-17 07:00:52 +00001261 DAG.getIntPtrConstant(16));
Gordon Henriksen18ace102008-01-05 16:56:59 +00001262 }
1263 if (!MemOps.empty())
1264 Root = DAG.getNode(ISD::TokenFactor, MVT::Other,
1265 &MemOps[0], MemOps.size());
1266 }
Gordon Henriksen6bbcc672008-01-03 16:47:34 +00001267 }
Gordon Henriksen18ace102008-01-05 16:56:59 +00001268
1269 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
1270 // arguments and the arguments after the retaddr has been pushed are
1271 // aligned.
1272 if (!Is64Bit && CC == CallingConv::X86_FastCall &&
1273 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows() &&
1274 (StackSize & 7) == 0)
1275 StackSize += 4;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001276
Gordon Henriksen6bbcc672008-01-03 16:47:34 +00001277 ArgValues.push_back(Root);
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +00001278
Gordon Henriksen18ace102008-01-05 16:56:59 +00001279 // Some CCs need callee pop.
1280 if (IsCalleePop(Op)) {
1281 BytesToPopOnReturn = StackSize; // Callee pops everything.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001282 BytesCallerReserves = 0;
1283 } else {
1284 BytesToPopOnReturn = 0; // Callee pops nothing.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001285 // If this is an sret function, the return should pop the hidden pointer.
Gordon Henriksen18ace102008-01-05 16:56:59 +00001286 if (!Is64Bit && ArgsAreStructReturn(Op))
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001287 BytesToPopOnReturn = 4;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001288 BytesCallerReserves = StackSize;
1289 }
Gordon Henriksen6bbcc672008-01-03 16:47:34 +00001290
Gordon Henriksen18ace102008-01-05 16:56:59 +00001291 if (!Is64Bit) {
1292 RegSaveFrameIndex = 0xAAAAAAA; // RegSaveFrameIndex is X86-64 only.
1293 if (CC == CallingConv::X86_FastCall)
1294 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs.
1295 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001296
Anton Korobeynikove844e472007-08-15 17:12:32 +00001297 FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001298
1299 // Return the new list of results.
1300 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(),
1301 &ArgValues[0], ArgValues.size()).getValue(Op.ResNo);
1302}
1303
Evan Chengbc077bf2008-01-10 00:09:10 +00001304SDOperand
1305X86TargetLowering::LowerMemOpCallTo(SDOperand Op, SelectionDAG &DAG,
1306 const SDOperand &StackPtr,
1307 const CCValAssign &VA,
1308 SDOperand Chain,
1309 SDOperand Arg) {
Dan Gohman1190f3a2008-02-07 16:28:05 +00001310 unsigned LocMemOffset = VA.getLocMemOffset();
1311 SDOperand PtrOff = DAG.getIntPtrConstant(LocMemOffset);
Evan Chengbc077bf2008-01-10 00:09:10 +00001312 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
1313 SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo());
1314 unsigned Flags = cast<ConstantSDNode>(FlagsOp)->getValue();
1315 if (Flags & ISD::ParamFlags::ByVal) {
Evan Cheng5817a0e2008-01-12 01:08:07 +00001316 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG);
Evan Chengbc077bf2008-01-10 00:09:10 +00001317 }
Dan Gohman1190f3a2008-02-07 16:28:05 +00001318 return DAG.getStore(Chain, Arg, PtrOff,
Dan Gohmanfb020b62008-02-07 18:41:25 +00001319 PseudoSourceValue::getStack(), LocMemOffset);
Evan Chengbc077bf2008-01-10 00:09:10 +00001320}
1321
Evan Cheng931a8f42008-01-29 19:34:22 +00001322/// ClassifyX86_64SRetCallReturn - Classify how to implement a x86-64
1323/// struct return call to the specified function. X86-64 ABI specifies
1324/// some SRet calls are actually returned in registers. Since current
1325/// LLVM cannot represent multi-value calls, they are represent as
1326/// calls where the results are passed in a hidden struct provided by
1327/// the caller. This function examines the type of the struct to
1328/// determine the correct way to implement the call.
1329X86::X86_64SRet
1330X86TargetLowering::ClassifyX86_64SRetCallReturn(const Function *Fn) {
1331 // FIXME: Disabled for now.
1332 return X86::InMemory;
1333
1334 const PointerType *PTy = cast<PointerType>(Fn->arg_begin()->getType());
1335 const Type *RTy = PTy->getElementType();
1336 unsigned Size = getTargetData()->getABITypeSize(RTy);
1337 if (Size != 16 && Size != 32)
1338 return X86::InMemory;
1339
1340 if (Size == 32) {
1341 const StructType *STy = dyn_cast<StructType>(RTy);
1342 if (!STy) return X86::InMemory;
1343 if (STy->getNumElements() == 2 &&
1344 STy->getElementType(0) == Type::X86_FP80Ty &&
1345 STy->getElementType(1) == Type::X86_FP80Ty)
1346 return X86::InX87;
1347 }
1348
1349 bool AllFP = true;
1350 for (Type::subtype_iterator I = RTy->subtype_begin(), E = RTy->subtype_end();
1351 I != E; ++I) {
1352 const Type *STy = I->get();
1353 if (!STy->isFPOrFPVector()) {
1354 AllFP = false;
1355 break;
1356 }
1357 }
1358
1359 if (AllFP)
1360 return X86::InSSE;
1361 return X86::InGPR64;
1362}
1363
1364void X86TargetLowering::X86_64AnalyzeSRetCallOperands(SDNode *TheCall,
1365 CCAssignFn *Fn,
1366 CCState &CCInfo) {
1367 unsigned NumOps = (TheCall->getNumOperands() - 5) / 2;
1368 for (unsigned i = 1; i != NumOps; ++i) {
1369 MVT::ValueType ArgVT = TheCall->getOperand(5+2*i).getValueType();
1370 SDOperand FlagOp = TheCall->getOperand(5+2*i+1);
1371 unsigned ArgFlags =cast<ConstantSDNode>(FlagOp)->getValue();
1372 if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo)) {
1373 cerr << "Call operand #" << i << " has unhandled type "
1374 << MVT::getValueTypeString(ArgVT) << "\n";
1375 abort();
1376 }
1377 }
1378}
1379
Gordon Henriksen18ace102008-01-05 16:56:59 +00001380SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) {
1381 MachineFunction &MF = DAG.getMachineFunction();
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001382 SDOperand Chain = Op.getOperand(0);
Gordon Henriksen18ace102008-01-05 16:56:59 +00001383 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001384 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
Gordon Henriksen18ace102008-01-05 16:56:59 +00001385 bool IsTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0
1386 && CC == CallingConv::Fast && PerformTailCallOpt;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001387 SDOperand Callee = Op.getOperand(4);
Gordon Henriksen18ace102008-01-05 16:56:59 +00001388 bool Is64Bit = Subtarget->is64Bit();
Evan Cheng931a8f42008-01-29 19:34:22 +00001389 bool IsStructRet = CallIsStructReturn(Op);
Gordon Henriksen6bbcc672008-01-03 16:47:34 +00001390
1391 assert(!(isVarArg && CC == CallingConv::Fast) &&
1392 "Var args not supported with calling convention fastcc");
1393
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001394 // Analyze operands of the call, assigning locations to each operand.
1395 SmallVector<CCValAssign, 16> ArgLocs;
1396 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
Evan Cheng931a8f42008-01-29 19:34:22 +00001397 CCAssignFn *CCFn = CCAssignFnForNode(Op);
1398
1399 X86::X86_64SRet SRetMethod = X86::InMemory;
1400 if (Is64Bit && IsStructRet)
1401 // FIXME: We can't figure out type of the sret structure for indirect
1402 // calls. We need to copy more information from CallSite to the ISD::CALL
1403 // node.
1404 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1405 SRetMethod =
1406 ClassifyX86_64SRetCallReturn(dyn_cast<Function>(G->getGlobal()));
1407
1408 // UGLY HACK! For x86-64, some 128-bit aggregates are returns in a pair of
1409 // registers. Unfortunately, llvm does not support i128 yet so we pretend it's
1410 // a sret call.
1411 if (SRetMethod != X86::InMemory)
1412 X86_64AnalyzeSRetCallOperands(Op.Val, CCFn, CCInfo);
1413 else
1414 CCInfo.AnalyzeCallOperands(Op.Val, CCFn);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001415
1416 // Get a count of how many bytes are to be pushed on the stack.
1417 unsigned NumBytes = CCInfo.getNextStackOffset();
Gordon Henriksen6bbcc672008-01-03 16:47:34 +00001418 if (CC == CallingConv::Fast)
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +00001419 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001420
Gordon Henriksen18ace102008-01-05 16:56:59 +00001421 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
1422 // arguments and the arguments after the retaddr has been pushed are aligned.
1423 if (!Is64Bit && CC == CallingConv::X86_FastCall &&
1424 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows() &&
1425 (NumBytes & 7) == 0)
1426 NumBytes += 4;
1427
1428 int FPDiff = 0;
1429 if (IsTailCall) {
1430 // Lower arguments at fp - stackoffset + fpdiff.
1431 unsigned NumBytesCallerPushed =
1432 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn();
1433 FPDiff = NumBytesCallerPushed - NumBytes;
1434
1435 // Set the delta of movement of the returnaddr stackslot.
1436 // But only set if delta is greater than previous delta.
1437 if (FPDiff < (MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta()))
1438 MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff);
1439 }
1440
Chris Lattner5872a362008-01-17 07:00:52 +00001441 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001442
Gordon Henriksen18ace102008-01-05 16:56:59 +00001443 SDOperand RetAddrFrIdx, NewRetAddrFrIdx;
1444 if (IsTailCall) {
1445 // Adjust the Return address stack slot.
1446 if (FPDiff) {
1447 MVT::ValueType VT = Is64Bit ? MVT::i64 : MVT::i32;
1448 RetAddrFrIdx = getReturnAddressFrameIndex(DAG);
1449 // Load the "old" Return address.
1450 RetAddrFrIdx =
1451 DAG.getLoad(VT, Chain,RetAddrFrIdx, NULL, 0);
1452 // Calculate the new stack slot for the return address.
1453 int SlotSize = Is64Bit ? 8 : 4;
1454 int NewReturnAddrFI =
1455 MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize);
1456 NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT);
1457 Chain = SDOperand(RetAddrFrIdx.Val, 1);
1458 }
1459 }
1460
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001461 SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass;
1462 SmallVector<SDOperand, 8> MemOpChains;
1463
1464 SDOperand StackPtr;
1465
Arnold Schwaighofer449b01a2008-01-11 16:49:42 +00001466 // Walk the register/memloc assignments, inserting copies/loads. For tail
1467 // calls, lower arguments which could otherwise be possibly overwritten to the
1468 // stack slot where they would go on normal function calls.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001469 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1470 CCValAssign &VA = ArgLocs[i];
1471 SDOperand Arg = Op.getOperand(5+2*VA.getValNo());
1472
1473 // Promote the value if needed.
1474 switch (VA.getLocInfo()) {
1475 default: assert(0 && "Unknown loc info!");
1476 case CCValAssign::Full: break;
1477 case CCValAssign::SExt:
1478 Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg);
1479 break;
1480 case CCValAssign::ZExt:
1481 Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg);
1482 break;
1483 case CCValAssign::AExt:
1484 Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg);
1485 break;
1486 }
1487
1488 if (VA.isRegLoc()) {
1489 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1490 } else {
Arnold Schwaighofer0e3c27e2008-01-11 17:10:15 +00001491 if (!IsTailCall || IsPossiblyOverwrittenArgumentOfTailCall(Arg)) {
Arnold Schwaighofer449b01a2008-01-11 16:49:42 +00001492 assert(VA.isMemLoc());
1493 if (StackPtr.Val == 0)
1494 StackPtr = DAG.getCopyFromReg(Chain, X86StackPtr, getPointerTy());
1495
1496 MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain,
1497 Arg));
1498 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001499 }
1500 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001501
1502 if (!MemOpChains.empty())
1503 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
1504 &MemOpChains[0], MemOpChains.size());
1505
1506 // Build a sequence of copy-to-reg nodes chained together with token chain
1507 // and flag operands which copy the outgoing args into registers.
1508 SDOperand InFlag;
1509 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1510 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second,
1511 InFlag);
1512 InFlag = Chain.getValue(1);
1513 }
Gordon Henriksen18ace102008-01-05 16:56:59 +00001514
1515 if (IsTailCall)
1516 InFlag = SDOperand(); // ??? Isn't this nuking the preceding loop's output?
1517
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001518 // ELF / PIC requires GOT in the EBX register before function calls via PLT
1519 // GOT pointer.
Gordon Henriksen18ace102008-01-05 16:56:59 +00001520 // Does not work with tail call since ebx is not restored correctly by
1521 // tailcaller. TODO: at least for x86 - verify for x86-64
1522 if (!IsTailCall && !Is64Bit &&
1523 getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001524 Subtarget->isPICStyleGOT()) {
1525 Chain = DAG.getCopyToReg(Chain, X86::EBX,
1526 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
1527 InFlag);
1528 InFlag = Chain.getValue(1);
1529 }
Gordon Henriksen6bbcc672008-01-03 16:47:34 +00001530
Gordon Henriksen18ace102008-01-05 16:56:59 +00001531 if (Is64Bit && isVarArg) {
1532 // From AMD64 ABI document:
1533 // For calls that may call functions that use varargs or stdargs
1534 // (prototype-less calls or calls to functions containing ellipsis (...) in
1535 // the declaration) %al is used as hidden argument to specify the number
1536 // of SSE registers used. The contents of %al do not need to match exactly
1537 // the number of registers, but must be an ubound on the number of SSE
1538 // registers used and is in the range 0 - 8 inclusive.
1539
1540 // Count the number of XMM registers allocated.
1541 static const unsigned XMMArgRegs[] = {
1542 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
1543 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
1544 };
1545 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
1546
1547 Chain = DAG.getCopyToReg(Chain, X86::AL,
1548 DAG.getConstant(NumXMMRegs, MVT::i8), InFlag);
1549 InFlag = Chain.getValue(1);
1550 }
1551
Arnold Schwaighofer449b01a2008-01-11 16:49:42 +00001552 // For tail calls lower the arguments to the 'real' stack slot.
Gordon Henriksen18ace102008-01-05 16:56:59 +00001553 if (IsTailCall) {
1554 SmallVector<SDOperand, 8> MemOpChains2;
Gordon Henriksen18ace102008-01-05 16:56:59 +00001555 SDOperand FIN;
1556 int FI = 0;
1557 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1558 CCValAssign &VA = ArgLocs[i];
1559 if (!VA.isRegLoc()) {
Arnold Schwaighofer449b01a2008-01-11 16:49:42 +00001560 assert(VA.isMemLoc());
1561 SDOperand Arg = Op.getOperand(5+2*VA.getValNo());
Gordon Henriksen18ace102008-01-05 16:56:59 +00001562 SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo());
1563 unsigned Flags = cast<ConstantSDNode>(FlagsOp)->getValue();
Gordon Henriksen18ace102008-01-05 16:56:59 +00001564 // Create frame index.
1565 int32_t Offset = VA.getLocMemOffset()+FPDiff;
1566 uint32_t OpSize = (MVT::getSizeInBits(VA.getLocVT())+7)/8;
1567 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset);
1568 FIN = DAG.getFrameIndex(FI, MVT::i32);
Arnold Schwaighofer449b01a2008-01-11 16:49:42 +00001569 SDOperand Source = Arg;
Evan Cheng5817a0e2008-01-12 01:08:07 +00001570 if (IsPossiblyOverwrittenArgumentOfTailCall(Arg)) {
Arnold Schwaighofer449b01a2008-01-11 16:49:42 +00001571 // Copy from stack slots to stack slot of a tail called function. This
1572 // needs to be done because if we would lower the arguments directly
1573 // to their real stack slot we might end up overwriting each other.
1574 // Get source stack slot.
Chris Lattner5872a362008-01-17 07:00:52 +00001575 Source = DAG.getIntPtrConstant(VA.getLocMemOffset());
Arnold Schwaighofer449b01a2008-01-11 16:49:42 +00001576 if (StackPtr.Val == 0)
1577 StackPtr = DAG.getCopyFromReg(Chain, X86StackPtr, getPointerTy());
1578 Source = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, Source);
1579 if ((Flags & ISD::ParamFlags::ByVal)==0)
Duncan Sands22981632008-01-13 21:20:29 +00001580 Source = DAG.getLoad(VA.getValVT(), Chain, Source, NULL, 0);
Arnold Schwaighofer449b01a2008-01-11 16:49:42 +00001581 }
1582
Gordon Henriksen18ace102008-01-05 16:56:59 +00001583 if (Flags & ISD::ParamFlags::ByVal) {
Evan Cheng5817a0e2008-01-12 01:08:07 +00001584 // Copy relative to framepointer.
1585 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN, Chain,
1586 Flags, DAG));
Gordon Henriksen18ace102008-01-05 16:56:59 +00001587 } else {
Evan Cheng5817a0e2008-01-12 01:08:07 +00001588 // Store relative to framepointer.
Dan Gohman12a9c082008-02-06 22:27:42 +00001589 MemOpChains2.push_back(
1590 DAG.getStore(Chain, Source, FIN,
Dan Gohmanfb020b62008-02-07 18:41:25 +00001591 PseudoSourceValue::getFixedStack(), FI));
Arnold Schwaighofer449b01a2008-01-11 16:49:42 +00001592 }
Gordon Henriksen18ace102008-01-05 16:56:59 +00001593 }
1594 }
1595
1596 if (!MemOpChains2.empty())
1597 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
Arnold Schwaighoferdfb21302008-01-11 14:34:56 +00001598 &MemOpChains2[0], MemOpChains2.size());
Gordon Henriksen18ace102008-01-05 16:56:59 +00001599
1600 // Store the return address to the appropriate stack slot.
1601 if (FPDiff)
1602 Chain = DAG.getStore(Chain,RetAddrFrIdx, NewRetAddrFrIdx, NULL, 0);
1603 }
1604
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001605 // If the callee is a GlobalAddress node (quite common, every direct call is)
1606 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1607 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1608 // We should use extra load for direct calls to dllimported functions in
1609 // non-JIT mode.
Gordon Henriksen18ace102008-01-05 16:56:59 +00001610 if ((IsTailCall || !Is64Bit ||
1611 getTargetMachine().getCodeModel() != CodeModel::Large)
1612 && !Subtarget->GVRequiresExtraLoad(G->getGlobal(),
1613 getTargetMachine(), true))
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001614 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
Gordon Henriksen6bbcc672008-01-03 16:47:34 +00001615 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
Gordon Henriksen18ace102008-01-05 16:56:59 +00001616 if (IsTailCall || !Is64Bit ||
1617 getTargetMachine().getCodeModel() != CodeModel::Large)
1618 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
1619 } else if (IsTailCall) {
1620 assert(Callee.getOpcode() == ISD::LOAD &&
1621 "Function destination must be loaded into virtual register");
1622 unsigned Opc = Is64Bit ? X86::R9 : X86::ECX;
1623
1624 Chain = DAG.getCopyToReg(Chain,
1625 DAG.getRegister(Opc, getPointerTy()) ,
1626 Callee,InFlag);
1627 Callee = DAG.getRegister(Opc, getPointerTy());
1628 // Add register as live out.
1629 DAG.getMachineFunction().getRegInfo().addLiveOut(Opc);
Gordon Henriksen6bbcc672008-01-03 16:47:34 +00001630 }
1631
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001632 // Returns a chain & a flag for retval copy to use.
1633 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
1634 SmallVector<SDOperand, 8> Ops;
Gordon Henriksen18ace102008-01-05 16:56:59 +00001635
1636 if (IsTailCall) {
1637 Ops.push_back(Chain);
Chris Lattner5872a362008-01-17 07:00:52 +00001638 Ops.push_back(DAG.getIntPtrConstant(NumBytes));
1639 Ops.push_back(DAG.getIntPtrConstant(0));
Gordon Henriksen18ace102008-01-05 16:56:59 +00001640 if (InFlag.Val)
1641 Ops.push_back(InFlag);
1642 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size());
1643 InFlag = Chain.getValue(1);
1644
1645 // Returns a chain & a flag for retval copy to use.
1646 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
1647 Ops.clear();
1648 }
1649
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001650 Ops.push_back(Chain);
1651 Ops.push_back(Callee);
1652
Gordon Henriksen18ace102008-01-05 16:56:59 +00001653 if (IsTailCall)
1654 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001655
1656 // Add an implicit use GOT pointer in EBX.
Gordon Henriksen18ace102008-01-05 16:56:59 +00001657 if (!IsTailCall && !Is64Bit &&
1658 getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001659 Subtarget->isPICStyleGOT())
1660 Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy()));
Gordon Henriksen6bbcc672008-01-03 16:47:34 +00001661
Gordon Henriksen18ace102008-01-05 16:56:59 +00001662 // Add argument registers to the end of the list so that they are known live
1663 // into the call.
Evan Chenge14fc242008-01-07 23:08:23 +00001664 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1665 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1666 RegsToPass[i].second.getValueType()));
Gordon Henriksen18ace102008-01-05 16:56:59 +00001667
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001668 if (InFlag.Val)
1669 Ops.push_back(InFlag);
Gordon Henriksen6bbcc672008-01-03 16:47:34 +00001670
Gordon Henriksen18ace102008-01-05 16:56:59 +00001671 if (IsTailCall) {
1672 assert(InFlag.Val &&
1673 "Flag must be set. Depend on flag being set in LowerRET");
1674 Chain = DAG.getNode(X86ISD::TAILCALL,
1675 Op.Val->getVTList(), &Ops[0], Ops.size());
1676
1677 return SDOperand(Chain.Val, Op.ResNo);
1678 }
1679
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +00001680 Chain = DAG.getNode(X86ISD::CALL, NodeTys, &Ops[0], Ops.size());
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001681 InFlag = Chain.getValue(1);
1682
1683 // Create the CALLSEQ_END node.
Gordon Henriksen18ace102008-01-05 16:56:59 +00001684 unsigned NumBytesForCalleeToPush;
1685 if (IsCalleePop(Op))
1686 NumBytesForCalleeToPush = NumBytes; // Callee pops everything
Evan Cheng931a8f42008-01-29 19:34:22 +00001687 else if (!Is64Bit && IsStructRet)
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001688 // If this is is a call to a struct-return function, the callee
1689 // pops the hidden struct pointer, so we have to push it back.
1690 // This is common for Darwin/X86, Linux & Mingw32 targets.
Gordon Henriksen6bbcc672008-01-03 16:47:34 +00001691 NumBytesForCalleeToPush = 4;
Gordon Henriksen18ace102008-01-05 16:56:59 +00001692 else
Gordon Henriksen6bbcc672008-01-03 16:47:34 +00001693 NumBytesForCalleeToPush = 0; // Callee pops nothing.
Gordon Henriksen18ace102008-01-05 16:56:59 +00001694
Gordon Henriksen6bbcc672008-01-03 16:47:34 +00001695 // Returns a flag for retval copy to use.
Bill Wendling22f8deb2007-11-13 00:44:25 +00001696 Chain = DAG.getCALLSEQ_END(Chain,
Chris Lattner5872a362008-01-17 07:00:52 +00001697 DAG.getIntPtrConstant(NumBytes),
1698 DAG.getIntPtrConstant(NumBytesForCalleeToPush),
Bill Wendling22f8deb2007-11-13 00:44:25 +00001699 InFlag);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001700 InFlag = Chain.getValue(1);
1701
1702 // Handle result values, copying them out of physregs into vregs that we
1703 // return.
Evan Cheng931a8f42008-01-29 19:34:22 +00001704 switch (SRetMethod) {
1705 default:
1706 return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo);
1707 case X86::InGPR64:
1708 return SDOperand(LowerCallResultToTwo64BitRegs(Chain, InFlag, Op.Val,
1709 X86::RAX, X86::RDX,
1710 MVT::i64, DAG), Op.ResNo);
1711 case X86::InSSE:
1712 return SDOperand(LowerCallResultToTwo64BitRegs(Chain, InFlag, Op.Val,
1713 X86::XMM0, X86::XMM1,
1714 MVT::f64, DAG), Op.ResNo);
1715 case X86::InX87:
1716 return SDOperand(LowerCallResultToTwoX87Regs(Chain, InFlag, Op.Val, DAG),
1717 Op.ResNo);
1718 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001719}
1720
1721
1722//===----------------------------------------------------------------------===//
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +00001723// Fast Calling Convention (tail call) implementation
1724//===----------------------------------------------------------------------===//
1725
1726// Like std call, callee cleans arguments, convention except that ECX is
1727// reserved for storing the tail called function address. Only 2 registers are
1728// free for argument passing (inreg). Tail call optimization is performed
1729// provided:
1730// * tailcallopt is enabled
1731// * caller/callee are fastcc
1732// * elf/pic is disabled OR
1733// * elf/pic enabled + callee is in module + callee has
1734// visibility protected or hidden
Arnold Schwaighofer373e8652007-10-12 21:30:57 +00001735// To keep the stack aligned according to platform abi the function
1736// GetAlignedArgumentStackSize ensures that argument delta is always multiples
1737// of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +00001738// If a tail called function callee has more arguments than the caller the
1739// caller needs to make sure that there is room to move the RETADDR to. This is
Arnold Schwaighofer373e8652007-10-12 21:30:57 +00001740// achieved by reserving an area the size of the argument delta right after the
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +00001741// original REtADDR, but before the saved framepointer or the spilled registers
1742// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
1743// stack layout:
1744// arg1
1745// arg2
1746// RETADDR
1747// [ new RETADDR
1748// move area ]
1749// (possible EBP)
1750// ESI
1751// EDI
1752// local1 ..
1753
1754/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned
1755/// for a 16 byte align requirement.
1756unsigned X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
1757 SelectionDAG& DAG) {
1758 if (PerformTailCallOpt) {
1759 MachineFunction &MF = DAG.getMachineFunction();
1760 const TargetMachine &TM = MF.getTarget();
1761 const TargetFrameInfo &TFI = *TM.getFrameInfo();
1762 unsigned StackAlignment = TFI.getStackAlignment();
1763 uint64_t AlignMask = StackAlignment - 1;
1764 int64_t Offset = StackSize;
1765 unsigned SlotSize = Subtarget->is64Bit() ? 8 : 4;
1766 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
1767 // Number smaller than 12 so just add the difference.
1768 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
1769 } else {
1770 // Mask out lower bits, add stackalignment once plus the 12 bytes.
1771 Offset = ((~AlignMask) & Offset) + StackAlignment +
1772 (StackAlignment-SlotSize);
1773 }
1774 StackSize = Offset;
1775 }
1776 return StackSize;
1777}
1778
1779/// IsEligibleForTailCallElimination - Check to see whether the next instruction
Evan Chenge7a87392007-11-02 01:26:22 +00001780/// following the call is a return. A function is eligible if caller/callee
1781/// calling conventions match, currently only fastcc supports tail calls, and
1782/// the function CALL is immediatly followed by a RET.
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +00001783bool X86TargetLowering::IsEligibleForTailCallOptimization(SDOperand Call,
1784 SDOperand Ret,
1785 SelectionDAG& DAG) const {
Evan Chenge7a87392007-11-02 01:26:22 +00001786 if (!PerformTailCallOpt)
1787 return false;
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +00001788
1789 // Check whether CALL node immediatly preceeds the RET node and whether the
1790 // return uses the result of the node or is a void return.
Evan Chenge7a87392007-11-02 01:26:22 +00001791 unsigned NumOps = Ret.getNumOperands();
1792 if ((NumOps == 1 &&
1793 (Ret.getOperand(0) == SDOperand(Call.Val,1) ||
1794 Ret.getOperand(0) == SDOperand(Call.Val,0))) ||
Evan Cheng26c0e982007-11-02 17:45:40 +00001795 (NumOps > 1 &&
Evan Chenge7a87392007-11-02 01:26:22 +00001796 Ret.getOperand(0) == SDOperand(Call.Val,Call.Val->getNumValues()-1) &&
1797 Ret.getOperand(1) == SDOperand(Call.Val,0))) {
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +00001798 MachineFunction &MF = DAG.getMachineFunction();
1799 unsigned CallerCC = MF.getFunction()->getCallingConv();
1800 unsigned CalleeCC = cast<ConstantSDNode>(Call.getOperand(1))->getValue();
1801 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
1802 SDOperand Callee = Call.getOperand(4);
1803 // On elf/pic %ebx needs to be livein.
Evan Chenge7a87392007-11-02 01:26:22 +00001804 if (getTargetMachine().getRelocationModel() != Reloc::PIC_ ||
1805 !Subtarget->isPICStyleGOT())
1806 return true;
1807
1808 // Can only do local tail calls with PIC.
Gordon Henriksen18ace102008-01-05 16:56:59 +00001809 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1810 return G->getGlobal()->hasHiddenVisibility()
1811 || G->getGlobal()->hasProtectedVisibility();
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +00001812 }
1813 }
Evan Chenge7a87392007-11-02 01:26:22 +00001814
1815 return false;
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +00001816}
1817
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001818//===----------------------------------------------------------------------===//
1819// Other Lowering Hooks
1820//===----------------------------------------------------------------------===//
1821
1822
1823SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) {
Anton Korobeynikove844e472007-08-15 17:12:32 +00001824 MachineFunction &MF = DAG.getMachineFunction();
1825 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
1826 int ReturnAddrIndex = FuncInfo->getRAIndex();
1827
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001828 if (ReturnAddrIndex == 0) {
1829 // Set up a frame object for the return address.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001830 if (Subtarget->is64Bit())
1831 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(8, -8);
1832 else
1833 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4);
Anton Korobeynikove844e472007-08-15 17:12:32 +00001834
1835 FuncInfo->setRAIndex(ReturnAddrIndex);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001836 }
1837
1838 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy());
1839}
1840
1841
1842
1843/// translateX86CC - do a one to one translation of a ISD::CondCode to the X86
1844/// specific condition code. It returns a false if it cannot do a direct
1845/// translation. X86CC is the translated CondCode. LHS/RHS are modified as
1846/// needed.
1847static bool translateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
1848 unsigned &X86CC, SDOperand &LHS, SDOperand &RHS,
1849 SelectionDAG &DAG) {
1850 X86CC = X86::COND_INVALID;
1851 if (!isFP) {
1852 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
1853 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
1854 // X > -1 -> X == 0, jump !sign.
1855 RHS = DAG.getConstant(0, RHS.getValueType());
1856 X86CC = X86::COND_NS;
1857 return true;
1858 } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
1859 // X < 0 -> X == 0, jump on sign.
1860 X86CC = X86::COND_S;
1861 return true;
Dan Gohman37b34262007-09-17 14:49:27 +00001862 } else if (SetCCOpcode == ISD::SETLT && RHSC->getValue() == 1) {
1863 // X < 1 -> X <= 0
1864 RHS = DAG.getConstant(0, RHS.getValueType());
1865 X86CC = X86::COND_LE;
1866 return true;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001867 }
1868 }
1869
1870 switch (SetCCOpcode) {
1871 default: break;
1872 case ISD::SETEQ: X86CC = X86::COND_E; break;
1873 case ISD::SETGT: X86CC = X86::COND_G; break;
1874 case ISD::SETGE: X86CC = X86::COND_GE; break;
1875 case ISD::SETLT: X86CC = X86::COND_L; break;
1876 case ISD::SETLE: X86CC = X86::COND_LE; break;
1877 case ISD::SETNE: X86CC = X86::COND_NE; break;
1878 case ISD::SETULT: X86CC = X86::COND_B; break;
1879 case ISD::SETUGT: X86CC = X86::COND_A; break;
1880 case ISD::SETULE: X86CC = X86::COND_BE; break;
1881 case ISD::SETUGE: X86CC = X86::COND_AE; break;
1882 }
1883 } else {
1884 // On a floating point condition, the flags are set as follows:
1885 // ZF PF CF op
1886 // 0 | 0 | 0 | X > Y
1887 // 0 | 0 | 1 | X < Y
1888 // 1 | 0 | 0 | X == Y
1889 // 1 | 1 | 1 | unordered
1890 bool Flip = false;
1891 switch (SetCCOpcode) {
1892 default: break;
1893 case ISD::SETUEQ:
1894 case ISD::SETEQ: X86CC = X86::COND_E; break;
1895 case ISD::SETOLT: Flip = true; // Fallthrough
1896 case ISD::SETOGT:
1897 case ISD::SETGT: X86CC = X86::COND_A; break;
1898 case ISD::SETOLE: Flip = true; // Fallthrough
1899 case ISD::SETOGE:
1900 case ISD::SETGE: X86CC = X86::COND_AE; break;
1901 case ISD::SETUGT: Flip = true; // Fallthrough
1902 case ISD::SETULT:
1903 case ISD::SETLT: X86CC = X86::COND_B; break;
1904 case ISD::SETUGE: Flip = true; // Fallthrough
1905 case ISD::SETULE:
1906 case ISD::SETLE: X86CC = X86::COND_BE; break;
1907 case ISD::SETONE:
1908 case ISD::SETNE: X86CC = X86::COND_NE; break;
1909 case ISD::SETUO: X86CC = X86::COND_P; break;
1910 case ISD::SETO: X86CC = X86::COND_NP; break;
1911 }
1912 if (Flip)
1913 std::swap(LHS, RHS);
1914 }
1915
1916 return X86CC != X86::COND_INVALID;
1917}
1918
1919/// hasFPCMov - is there a floating point cmov for the specific X86 condition
1920/// code. Current x86 isa includes the following FP cmov instructions:
1921/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
1922static bool hasFPCMov(unsigned X86CC) {
1923 switch (X86CC) {
1924 default:
1925 return false;
1926 case X86::COND_B:
1927 case X86::COND_BE:
1928 case X86::COND_E:
1929 case X86::COND_P:
1930 case X86::COND_A:
1931 case X86::COND_AE:
1932 case X86::COND_NE:
1933 case X86::COND_NP:
1934 return true;
1935 }
1936}
1937
1938/// isUndefOrInRange - Op is either an undef node or a ConstantSDNode. Return
1939/// true if Op is undef or if its value falls within the specified range (L, H].
1940static bool isUndefOrInRange(SDOperand Op, unsigned Low, unsigned Hi) {
1941 if (Op.getOpcode() == ISD::UNDEF)
1942 return true;
1943
1944 unsigned Val = cast<ConstantSDNode>(Op)->getValue();
1945 return (Val >= Low && Val < Hi);
1946}
1947
1948/// isUndefOrEqual - Op is either an undef node or a ConstantSDNode. Return
1949/// true if Op is undef or if its value equal to the specified value.
1950static bool isUndefOrEqual(SDOperand Op, unsigned Val) {
1951 if (Op.getOpcode() == ISD::UNDEF)
1952 return true;
1953 return cast<ConstantSDNode>(Op)->getValue() == Val;
1954}
1955
1956/// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand
1957/// specifies a shuffle of elements that is suitable for input to PSHUFD.
1958bool X86::isPSHUFDMask(SDNode *N) {
1959 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1960
Dan Gohman7dc19012007-08-02 21:17:01 +00001961 if (N->getNumOperands() != 2 && N->getNumOperands() != 4)
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001962 return false;
1963
1964 // Check if the value doesn't reference the second vector.
1965 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
1966 SDOperand Arg = N->getOperand(i);
1967 if (Arg.getOpcode() == ISD::UNDEF) continue;
1968 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
Dan Gohman7dc19012007-08-02 21:17:01 +00001969 if (cast<ConstantSDNode>(Arg)->getValue() >= e)
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001970 return false;
1971 }
1972
1973 return true;
1974}
1975
1976/// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand
1977/// specifies a shuffle of elements that is suitable for input to PSHUFHW.
1978bool X86::isPSHUFHWMask(SDNode *N) {
1979 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1980
1981 if (N->getNumOperands() != 8)
1982 return false;
1983
1984 // Lower quadword copied in order.
1985 for (unsigned i = 0; i != 4; ++i) {
1986 SDOperand Arg = N->getOperand(i);
1987 if (Arg.getOpcode() == ISD::UNDEF) continue;
1988 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
1989 if (cast<ConstantSDNode>(Arg)->getValue() != i)
1990 return false;
1991 }
1992
1993 // Upper quadword shuffled.
1994 for (unsigned i = 4; i != 8; ++i) {
1995 SDOperand Arg = N->getOperand(i);
1996 if (Arg.getOpcode() == ISD::UNDEF) continue;
1997 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
1998 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
1999 if (Val < 4 || Val > 7)
2000 return false;
2001 }
2002
2003 return true;
2004}
2005
2006/// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand
2007/// specifies a shuffle of elements that is suitable for input to PSHUFLW.
2008bool X86::isPSHUFLWMask(SDNode *N) {
2009 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2010
2011 if (N->getNumOperands() != 8)
2012 return false;
2013
2014 // Upper quadword copied in order.
2015 for (unsigned i = 4; i != 8; ++i)
2016 if (!isUndefOrEqual(N->getOperand(i), i))
2017 return false;
2018
2019 // Lower quadword shuffled.
2020 for (unsigned i = 0; i != 4; ++i)
2021 if (!isUndefOrInRange(N->getOperand(i), 0, 4))
2022 return false;
2023
2024 return true;
2025}
2026
2027/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
2028/// specifies a shuffle of elements that is suitable for input to SHUFP*.
2029static bool isSHUFPMask(const SDOperand *Elems, unsigned NumElems) {
2030 if (NumElems != 2 && NumElems != 4) return false;
2031
2032 unsigned Half = NumElems / 2;
2033 for (unsigned i = 0; i < Half; ++i)
2034 if (!isUndefOrInRange(Elems[i], 0, NumElems))
2035 return false;
2036 for (unsigned i = Half; i < NumElems; ++i)
2037 if (!isUndefOrInRange(Elems[i], NumElems, NumElems*2))
2038 return false;
2039
2040 return true;
2041}
2042
2043bool X86::isSHUFPMask(SDNode *N) {
2044 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2045 return ::isSHUFPMask(N->op_begin(), N->getNumOperands());
2046}
2047
2048/// isCommutedSHUFP - Returns true if the shuffle mask is exactly
2049/// the reverse of what x86 shuffles want. x86 shuffles requires the lower
2050/// half elements to come from vector 1 (which would equal the dest.) and
2051/// the upper half to come from vector 2.
2052static bool isCommutedSHUFP(const SDOperand *Ops, unsigned NumOps) {
2053 if (NumOps != 2 && NumOps != 4) return false;
2054
2055 unsigned Half = NumOps / 2;
2056 for (unsigned i = 0; i < Half; ++i)
2057 if (!isUndefOrInRange(Ops[i], NumOps, NumOps*2))
2058 return false;
2059 for (unsigned i = Half; i < NumOps; ++i)
2060 if (!isUndefOrInRange(Ops[i], 0, NumOps))
2061 return false;
2062 return true;
2063}
2064
2065static bool isCommutedSHUFP(SDNode *N) {
2066 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2067 return isCommutedSHUFP(N->op_begin(), N->getNumOperands());
2068}
2069
2070/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
2071/// specifies a shuffle of elements that is suitable for input to MOVHLPS.
2072bool X86::isMOVHLPSMask(SDNode *N) {
2073 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2074
2075 if (N->getNumOperands() != 4)
2076 return false;
2077
2078 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3
2079 return isUndefOrEqual(N->getOperand(0), 6) &&
2080 isUndefOrEqual(N->getOperand(1), 7) &&
2081 isUndefOrEqual(N->getOperand(2), 2) &&
2082 isUndefOrEqual(N->getOperand(3), 3);
2083}
2084
2085/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
2086/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
2087/// <2, 3, 2, 3>
2088bool X86::isMOVHLPS_v_undef_Mask(SDNode *N) {
2089 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2090
2091 if (N->getNumOperands() != 4)
2092 return false;
2093
2094 // Expect bit0 == 2, bit1 == 3, bit2 == 2, bit3 == 3
2095 return isUndefOrEqual(N->getOperand(0), 2) &&
2096 isUndefOrEqual(N->getOperand(1), 3) &&
2097 isUndefOrEqual(N->getOperand(2), 2) &&
2098 isUndefOrEqual(N->getOperand(3), 3);
2099}
2100
2101/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
2102/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
2103bool X86::isMOVLPMask(SDNode *N) {
2104 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2105
2106 unsigned NumElems = N->getNumOperands();
2107 if (NumElems != 2 && NumElems != 4)
2108 return false;
2109
2110 for (unsigned i = 0; i < NumElems/2; ++i)
2111 if (!isUndefOrEqual(N->getOperand(i), i + NumElems))
2112 return false;
2113
2114 for (unsigned i = NumElems/2; i < NumElems; ++i)
2115 if (!isUndefOrEqual(N->getOperand(i), i))
2116 return false;
2117
2118 return true;
2119}
2120
2121/// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand
2122/// specifies a shuffle of elements that is suitable for input to MOVHP{S|D}
2123/// and MOVLHPS.
2124bool X86::isMOVHPMask(SDNode *N) {
2125 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2126
2127 unsigned NumElems = N->getNumOperands();
2128 if (NumElems != 2 && NumElems != 4)
2129 return false;
2130
2131 for (unsigned i = 0; i < NumElems/2; ++i)
2132 if (!isUndefOrEqual(N->getOperand(i), i))
2133 return false;
2134
2135 for (unsigned i = 0; i < NumElems/2; ++i) {
2136 SDOperand Arg = N->getOperand(i + NumElems/2);
2137 if (!isUndefOrEqual(Arg, i + NumElems))
2138 return false;
2139 }
2140
2141 return true;
2142}
2143
2144/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
2145/// specifies a shuffle of elements that is suitable for input to UNPCKL.
2146bool static isUNPCKLMask(const SDOperand *Elts, unsigned NumElts,
2147 bool V2IsSplat = false) {
2148 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16)
2149 return false;
2150
2151 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) {
2152 SDOperand BitI = Elts[i];
2153 SDOperand BitI1 = Elts[i+1];
2154 if (!isUndefOrEqual(BitI, j))
2155 return false;
2156 if (V2IsSplat) {
2157 if (isUndefOrEqual(BitI1, NumElts))
2158 return false;
2159 } else {
2160 if (!isUndefOrEqual(BitI1, j + NumElts))
2161 return false;
2162 }
2163 }
2164
2165 return true;
2166}
2167
2168bool X86::isUNPCKLMask(SDNode *N, bool V2IsSplat) {
2169 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2170 return ::isUNPCKLMask(N->op_begin(), N->getNumOperands(), V2IsSplat);
2171}
2172
2173/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
2174/// specifies a shuffle of elements that is suitable for input to UNPCKH.
2175bool static isUNPCKHMask(const SDOperand *Elts, unsigned NumElts,
2176 bool V2IsSplat = false) {
2177 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16)
2178 return false;
2179
2180 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) {
2181 SDOperand BitI = Elts[i];
2182 SDOperand BitI1 = Elts[i+1];
2183 if (!isUndefOrEqual(BitI, j + NumElts/2))
2184 return false;
2185 if (V2IsSplat) {
2186 if (isUndefOrEqual(BitI1, NumElts))
2187 return false;
2188 } else {
2189 if (!isUndefOrEqual(BitI1, j + NumElts/2 + NumElts))
2190 return false;
2191 }
2192 }
2193
2194 return true;
2195}
2196
2197bool X86::isUNPCKHMask(SDNode *N, bool V2IsSplat) {
2198 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2199 return ::isUNPCKHMask(N->op_begin(), N->getNumOperands(), V2IsSplat);
2200}
2201
2202/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
2203/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
2204/// <0, 0, 1, 1>
2205bool X86::isUNPCKL_v_undef_Mask(SDNode *N) {
2206 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2207
2208 unsigned NumElems = N->getNumOperands();
2209 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
2210 return false;
2211
2212 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) {
2213 SDOperand BitI = N->getOperand(i);
2214 SDOperand BitI1 = N->getOperand(i+1);
2215
2216 if (!isUndefOrEqual(BitI, j))
2217 return false;
2218 if (!isUndefOrEqual(BitI1, j))
2219 return false;
2220 }
2221
2222 return true;
2223}
2224
2225/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
2226/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
2227/// <2, 2, 3, 3>
2228bool X86::isUNPCKH_v_undef_Mask(SDNode *N) {
2229 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2230
2231 unsigned NumElems = N->getNumOperands();
2232 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
2233 return false;
2234
2235 for (unsigned i = 0, j = NumElems / 2; i != NumElems; i += 2, ++j) {
2236 SDOperand BitI = N->getOperand(i);
2237 SDOperand BitI1 = N->getOperand(i + 1);
2238
2239 if (!isUndefOrEqual(BitI, j))
2240 return false;
2241 if (!isUndefOrEqual(BitI1, j))
2242 return false;
2243 }
2244
2245 return true;
2246}
2247
2248/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
2249/// specifies a shuffle of elements that is suitable for input to MOVSS,
2250/// MOVSD, and MOVD, i.e. setting the lowest element.
2251static bool isMOVLMask(const SDOperand *Elts, unsigned NumElts) {
Evan Cheng62cdc642007-12-06 22:14:22 +00002252 if (NumElts != 2 && NumElts != 4)
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002253 return false;
2254
2255 if (!isUndefOrEqual(Elts[0], NumElts))
2256 return false;
2257
2258 for (unsigned i = 1; i < NumElts; ++i) {
2259 if (!isUndefOrEqual(Elts[i], i))
2260 return false;
2261 }
2262
2263 return true;
2264}
2265
2266bool X86::isMOVLMask(SDNode *N) {
2267 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2268 return ::isMOVLMask(N->op_begin(), N->getNumOperands());
2269}
2270
2271/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse
2272/// of what x86 movss want. X86 movs requires the lowest element to be lowest
2273/// element of vector 2 and the other elements to come from vector 1 in order.
2274static bool isCommutedMOVL(const SDOperand *Ops, unsigned NumOps,
2275 bool V2IsSplat = false,
2276 bool V2IsUndef = false) {
2277 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16)
2278 return false;
2279
2280 if (!isUndefOrEqual(Ops[0], 0))
2281 return false;
2282
2283 for (unsigned i = 1; i < NumOps; ++i) {
2284 SDOperand Arg = Ops[i];
2285 if (!(isUndefOrEqual(Arg, i+NumOps) ||
2286 (V2IsUndef && isUndefOrInRange(Arg, NumOps, NumOps*2)) ||
2287 (V2IsSplat && isUndefOrEqual(Arg, NumOps))))
2288 return false;
2289 }
2290
2291 return true;
2292}
2293
2294static bool isCommutedMOVL(SDNode *N, bool V2IsSplat = false,
2295 bool V2IsUndef = false) {
2296 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2297 return isCommutedMOVL(N->op_begin(), N->getNumOperands(),
2298 V2IsSplat, V2IsUndef);
2299}
2300
2301/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
2302/// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
2303bool X86::isMOVSHDUPMask(SDNode *N) {
2304 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2305
2306 if (N->getNumOperands() != 4)
2307 return false;
2308
2309 // Expect 1, 1, 3, 3
2310 for (unsigned i = 0; i < 2; ++i) {
2311 SDOperand Arg = N->getOperand(i);
2312 if (Arg.getOpcode() == ISD::UNDEF) continue;
2313 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2314 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2315 if (Val != 1) return false;
2316 }
2317
2318 bool HasHi = false;
2319 for (unsigned i = 2; i < 4; ++i) {
2320 SDOperand Arg = N->getOperand(i);
2321 if (Arg.getOpcode() == ISD::UNDEF) continue;
2322 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2323 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2324 if (Val != 3) return false;
2325 HasHi = true;
2326 }
2327
2328 // Don't use movshdup if it can be done with a shufps.
2329 return HasHi;
2330}
2331
2332/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
2333/// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
2334bool X86::isMOVSLDUPMask(SDNode *N) {
2335 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2336
2337 if (N->getNumOperands() != 4)
2338 return false;
2339
2340 // Expect 0, 0, 2, 2
2341 for (unsigned i = 0; i < 2; ++i) {
2342 SDOperand Arg = N->getOperand(i);
2343 if (Arg.getOpcode() == ISD::UNDEF) continue;
2344 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2345 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2346 if (Val != 0) return false;
2347 }
2348
2349 bool HasHi = false;
2350 for (unsigned i = 2; i < 4; ++i) {
2351 SDOperand Arg = N->getOperand(i);
2352 if (Arg.getOpcode() == ISD::UNDEF) continue;
2353 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2354 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2355 if (Val != 2) return false;
2356 HasHi = true;
2357 }
2358
2359 // Don't use movshdup if it can be done with a shufps.
2360 return HasHi;
2361}
2362
2363/// isIdentityMask - Return true if the specified VECTOR_SHUFFLE operand
2364/// specifies a identity operation on the LHS or RHS.
2365static bool isIdentityMask(SDNode *N, bool RHS = false) {
2366 unsigned NumElems = N->getNumOperands();
2367 for (unsigned i = 0; i < NumElems; ++i)
2368 if (!isUndefOrEqual(N->getOperand(i), i + (RHS ? NumElems : 0)))
2369 return false;
2370 return true;
2371}
2372
2373/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies
2374/// a splat of a single element.
2375static bool isSplatMask(SDNode *N) {
2376 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2377
2378 // This is a splat operation if each element of the permute is the same, and
2379 // if the value doesn't reference the second vector.
2380 unsigned NumElems = N->getNumOperands();
2381 SDOperand ElementBase;
2382 unsigned i = 0;
2383 for (; i != NumElems; ++i) {
2384 SDOperand Elt = N->getOperand(i);
2385 if (isa<ConstantSDNode>(Elt)) {
2386 ElementBase = Elt;
2387 break;
2388 }
2389 }
2390
2391 if (!ElementBase.Val)
2392 return false;
2393
2394 for (; i != NumElems; ++i) {
2395 SDOperand Arg = N->getOperand(i);
2396 if (Arg.getOpcode() == ISD::UNDEF) continue;
2397 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2398 if (Arg != ElementBase) return false;
2399 }
2400
2401 // Make sure it is a splat of the first vector operand.
2402 return cast<ConstantSDNode>(ElementBase)->getValue() < NumElems;
2403}
2404
2405/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies
2406/// a splat of a single element and it's a 2 or 4 element mask.
2407bool X86::isSplatMask(SDNode *N) {
2408 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2409
2410 // We can only splat 64-bit, and 32-bit quantities with a single instruction.
2411 if (N->getNumOperands() != 4 && N->getNumOperands() != 2)
2412 return false;
2413 return ::isSplatMask(N);
2414}
2415
2416/// isSplatLoMask - Return true if the specified VECTOR_SHUFFLE operand
2417/// specifies a splat of zero element.
2418bool X86::isSplatLoMask(SDNode *N) {
2419 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2420
2421 for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i)
2422 if (!isUndefOrEqual(N->getOperand(i), 0))
2423 return false;
2424 return true;
2425}
2426
2427/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
2428/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP*
2429/// instructions.
2430unsigned X86::getShuffleSHUFImmediate(SDNode *N) {
2431 unsigned NumOperands = N->getNumOperands();
2432 unsigned Shift = (NumOperands == 4) ? 2 : 1;
2433 unsigned Mask = 0;
2434 for (unsigned i = 0; i < NumOperands; ++i) {
2435 unsigned Val = 0;
2436 SDOperand Arg = N->getOperand(NumOperands-i-1);
2437 if (Arg.getOpcode() != ISD::UNDEF)
2438 Val = cast<ConstantSDNode>(Arg)->getValue();
2439 if (Val >= NumOperands) Val -= NumOperands;
2440 Mask |= Val;
2441 if (i != NumOperands - 1)
2442 Mask <<= Shift;
2443 }
2444
2445 return Mask;
2446}
2447
2448/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
2449/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW
2450/// instructions.
2451unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) {
2452 unsigned Mask = 0;
2453 // 8 nodes, but we only care about the last 4.
2454 for (unsigned i = 7; i >= 4; --i) {
2455 unsigned Val = 0;
2456 SDOperand Arg = N->getOperand(i);
2457 if (Arg.getOpcode() != ISD::UNDEF)
2458 Val = cast<ConstantSDNode>(Arg)->getValue();
2459 Mask |= (Val - 4);
2460 if (i != 4)
2461 Mask <<= 2;
2462 }
2463
2464 return Mask;
2465}
2466
2467/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
2468/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW
2469/// instructions.
2470unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) {
2471 unsigned Mask = 0;
2472 // 8 nodes, but we only care about the first 4.
2473 for (int i = 3; i >= 0; --i) {
2474 unsigned Val = 0;
2475 SDOperand Arg = N->getOperand(i);
2476 if (Arg.getOpcode() != ISD::UNDEF)
2477 Val = cast<ConstantSDNode>(Arg)->getValue();
2478 Mask |= Val;
2479 if (i != 0)
2480 Mask <<= 2;
2481 }
2482
2483 return Mask;
2484}
2485
2486/// isPSHUFHW_PSHUFLWMask - true if the specified VECTOR_SHUFFLE operand
2487/// specifies a 8 element shuffle that can be broken into a pair of
2488/// PSHUFHW and PSHUFLW.
2489static bool isPSHUFHW_PSHUFLWMask(SDNode *N) {
2490 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2491
2492 if (N->getNumOperands() != 8)
2493 return false;
2494
2495 // Lower quadword shuffled.
2496 for (unsigned i = 0; i != 4; ++i) {
2497 SDOperand Arg = N->getOperand(i);
2498 if (Arg.getOpcode() == ISD::UNDEF) continue;
2499 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2500 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
Evan Cheng75184a92007-12-11 01:46:18 +00002501 if (Val >= 4)
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002502 return false;
2503 }
2504
2505 // Upper quadword shuffled.
2506 for (unsigned i = 4; i != 8; ++i) {
2507 SDOperand Arg = N->getOperand(i);
2508 if (Arg.getOpcode() == ISD::UNDEF) continue;
2509 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2510 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2511 if (Val < 4 || Val > 7)
2512 return false;
2513 }
2514
2515 return true;
2516}
2517
Chris Lattnere6aa3862007-11-25 00:24:49 +00002518/// CommuteVectorShuffle - Swap vector_shuffle operands as well as
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002519/// values in ther permute mask.
2520static SDOperand CommuteVectorShuffle(SDOperand Op, SDOperand &V1,
2521 SDOperand &V2, SDOperand &Mask,
2522 SelectionDAG &DAG) {
2523 MVT::ValueType VT = Op.getValueType();
2524 MVT::ValueType MaskVT = Mask.getValueType();
2525 MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT);
2526 unsigned NumElems = Mask.getNumOperands();
2527 SmallVector<SDOperand, 8> MaskVec;
2528
2529 for (unsigned i = 0; i != NumElems; ++i) {
2530 SDOperand Arg = Mask.getOperand(i);
2531 if (Arg.getOpcode() == ISD::UNDEF) {
2532 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT));
2533 continue;
2534 }
2535 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2536 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2537 if (Val < NumElems)
2538 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT));
2539 else
2540 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT));
2541 }
2542
2543 std::swap(V1, V2);
Evan Chengfca29242007-12-07 08:07:39 +00002544 Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002545 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask);
2546}
2547
Evan Chenga6769df2007-12-07 21:30:01 +00002548/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming
2549/// the two vector operands have swapped position.
Evan Chengfca29242007-12-07 08:07:39 +00002550static
2551SDOperand CommuteVectorShuffleMask(SDOperand Mask, SelectionDAG &DAG) {
2552 MVT::ValueType MaskVT = Mask.getValueType();
2553 MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT);
2554 unsigned NumElems = Mask.getNumOperands();
2555 SmallVector<SDOperand, 8> MaskVec;
2556 for (unsigned i = 0; i != NumElems; ++i) {
2557 SDOperand Arg = Mask.getOperand(i);
2558 if (Arg.getOpcode() == ISD::UNDEF) {
2559 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT));
2560 continue;
2561 }
2562 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2563 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2564 if (Val < NumElems)
2565 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT));
2566 else
2567 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT));
2568 }
2569 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems);
2570}
2571
2572
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002573/// ShouldXformToMOVHLPS - Return true if the node should be transformed to
2574/// match movhlps. The lower half elements should come from upper half of
2575/// V1 (and in order), and the upper half elements should come from the upper
2576/// half of V2 (and in order).
2577static bool ShouldXformToMOVHLPS(SDNode *Mask) {
2578 unsigned NumElems = Mask->getNumOperands();
2579 if (NumElems != 4)
2580 return false;
2581 for (unsigned i = 0, e = 2; i != e; ++i)
2582 if (!isUndefOrEqual(Mask->getOperand(i), i+2))
2583 return false;
2584 for (unsigned i = 2; i != 4; ++i)
2585 if (!isUndefOrEqual(Mask->getOperand(i), i+4))
2586 return false;
2587 return true;
2588}
2589
2590/// isScalarLoadToVector - Returns true if the node is a scalar load that
2591/// is promoted to a vector.
2592static inline bool isScalarLoadToVector(SDNode *N) {
2593 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) {
2594 N = N->getOperand(0).Val;
2595 return ISD::isNON_EXTLoad(N);
2596 }
2597 return false;
2598}
2599
2600/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to
2601/// match movlp{s|d}. The lower half elements should come from lower half of
2602/// V1 (and in order), and the upper half elements should come from the upper
2603/// half of V2 (and in order). And since V1 will become the source of the
2604/// MOVLP, it must be either a vector load or a scalar load to vector.
2605static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, SDNode *Mask) {
2606 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1))
2607 return false;
2608 // Is V2 is a vector load, don't do this transformation. We will try to use
2609 // load folding shufps op.
2610 if (ISD::isNON_EXTLoad(V2))
2611 return false;
2612
2613 unsigned NumElems = Mask->getNumOperands();
2614 if (NumElems != 2 && NumElems != 4)
2615 return false;
2616 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
2617 if (!isUndefOrEqual(Mask->getOperand(i), i))
2618 return false;
2619 for (unsigned i = NumElems/2; i != NumElems; ++i)
2620 if (!isUndefOrEqual(Mask->getOperand(i), i+NumElems))
2621 return false;
2622 return true;
2623}
2624
2625/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are
2626/// all the same.
2627static bool isSplatVector(SDNode *N) {
2628 if (N->getOpcode() != ISD::BUILD_VECTOR)
2629 return false;
2630
2631 SDOperand SplatValue = N->getOperand(0);
2632 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i)
2633 if (N->getOperand(i) != SplatValue)
2634 return false;
2635 return true;
2636}
2637
2638/// isUndefShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
2639/// to an undef.
2640static bool isUndefShuffle(SDNode *N) {
2641 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
2642 return false;
2643
2644 SDOperand V1 = N->getOperand(0);
2645 SDOperand V2 = N->getOperand(1);
2646 SDOperand Mask = N->getOperand(2);
2647 unsigned NumElems = Mask.getNumOperands();
2648 for (unsigned i = 0; i != NumElems; ++i) {
2649 SDOperand Arg = Mask.getOperand(i);
2650 if (Arg.getOpcode() != ISD::UNDEF) {
2651 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2652 if (Val < NumElems && V1.getOpcode() != ISD::UNDEF)
2653 return false;
2654 else if (Val >= NumElems && V2.getOpcode() != ISD::UNDEF)
2655 return false;
2656 }
2657 }
2658 return true;
2659}
2660
2661/// isZeroNode - Returns true if Elt is a constant zero or a floating point
2662/// constant +0.0.
2663static inline bool isZeroNode(SDOperand Elt) {
2664 return ((isa<ConstantSDNode>(Elt) &&
2665 cast<ConstantSDNode>(Elt)->getValue() == 0) ||
2666 (isa<ConstantFPSDNode>(Elt) &&
Dale Johannesendf8a8312007-08-31 04:03:46 +00002667 cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero()));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002668}
2669
2670/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
2671/// to an zero vector.
2672static bool isZeroShuffle(SDNode *N) {
2673 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
2674 return false;
2675
2676 SDOperand V1 = N->getOperand(0);
2677 SDOperand V2 = N->getOperand(1);
2678 SDOperand Mask = N->getOperand(2);
2679 unsigned NumElems = Mask.getNumOperands();
2680 for (unsigned i = 0; i != NumElems; ++i) {
2681 SDOperand Arg = Mask.getOperand(i);
Chris Lattnere6aa3862007-11-25 00:24:49 +00002682 if (Arg.getOpcode() == ISD::UNDEF)
2683 continue;
2684
2685 unsigned Idx = cast<ConstantSDNode>(Arg)->getValue();
2686 if (Idx < NumElems) {
2687 unsigned Opc = V1.Val->getOpcode();
2688 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.Val))
2689 continue;
2690 if (Opc != ISD::BUILD_VECTOR ||
2691 !isZeroNode(V1.Val->getOperand(Idx)))
2692 return false;
2693 } else if (Idx >= NumElems) {
2694 unsigned Opc = V2.Val->getOpcode();
2695 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.Val))
2696 continue;
2697 if (Opc != ISD::BUILD_VECTOR ||
2698 !isZeroNode(V2.Val->getOperand(Idx - NumElems)))
2699 return false;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002700 }
2701 }
2702 return true;
2703}
2704
2705/// getZeroVector - Returns a vector of specified type with all zero elements.
2706///
2707static SDOperand getZeroVector(MVT::ValueType VT, SelectionDAG &DAG) {
2708 assert(MVT::isVector(VT) && "Expected a vector type");
Chris Lattnere6aa3862007-11-25 00:24:49 +00002709
2710 // Always build zero vectors as <4 x i32> or <2 x i32> bitcasted to their dest
2711 // type. This ensures they get CSE'd.
2712 SDOperand Cst = DAG.getTargetConstant(0, MVT::i32);
2713 SDOperand Vec;
2714 if (MVT::getSizeInBits(VT) == 64) // MMX
2715 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst);
2716 else // SSE
2717 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst);
2718 return DAG.getNode(ISD::BIT_CONVERT, VT, Vec);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002719}
2720
Chris Lattnere6aa3862007-11-25 00:24:49 +00002721/// getOnesVector - Returns a vector of specified type with all bits set.
2722///
2723static SDOperand getOnesVector(MVT::ValueType VT, SelectionDAG &DAG) {
2724 assert(MVT::isVector(VT) && "Expected a vector type");
2725
2726 // Always build ones vectors as <4 x i32> or <2 x i32> bitcasted to their dest
2727 // type. This ensures they get CSE'd.
2728 SDOperand Cst = DAG.getTargetConstant(~0U, MVT::i32);
2729 SDOperand Vec;
2730 if (MVT::getSizeInBits(VT) == 64) // MMX
2731 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst);
2732 else // SSE
2733 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst);
2734 return DAG.getNode(ISD::BIT_CONVERT, VT, Vec);
2735}
2736
2737
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002738/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
2739/// that point to V2 points to its first element.
2740static SDOperand NormalizeMask(SDOperand Mask, SelectionDAG &DAG) {
2741 assert(Mask.getOpcode() == ISD::BUILD_VECTOR);
2742
2743 bool Changed = false;
2744 SmallVector<SDOperand, 8> MaskVec;
2745 unsigned NumElems = Mask.getNumOperands();
2746 for (unsigned i = 0; i != NumElems; ++i) {
2747 SDOperand Arg = Mask.getOperand(i);
2748 if (Arg.getOpcode() != ISD::UNDEF) {
2749 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2750 if (Val > NumElems) {
2751 Arg = DAG.getConstant(NumElems, Arg.getValueType());
2752 Changed = true;
2753 }
2754 }
2755 MaskVec.push_back(Arg);
2756 }
2757
2758 if (Changed)
2759 Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(),
2760 &MaskVec[0], MaskVec.size());
2761 return Mask;
2762}
2763
2764/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
2765/// operation of specified width.
2766static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) {
2767 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
2768 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT);
2769
2770 SmallVector<SDOperand, 8> MaskVec;
2771 MaskVec.push_back(DAG.getConstant(NumElems, BaseVT));
2772 for (unsigned i = 1; i != NumElems; ++i)
2773 MaskVec.push_back(DAG.getConstant(i, BaseVT));
2774 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size());
2775}
2776
2777/// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation
2778/// of specified width.
2779static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) {
2780 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
2781 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT);
2782 SmallVector<SDOperand, 8> MaskVec;
2783 for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
2784 MaskVec.push_back(DAG.getConstant(i, BaseVT));
2785 MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT));
2786 }
2787 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size());
2788}
2789
2790/// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation
2791/// of specified width.
2792static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) {
2793 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
2794 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT);
2795 unsigned Half = NumElems/2;
2796 SmallVector<SDOperand, 8> MaskVec;
2797 for (unsigned i = 0; i != Half; ++i) {
2798 MaskVec.push_back(DAG.getConstant(i + Half, BaseVT));
2799 MaskVec.push_back(DAG.getConstant(i + NumElems + Half, BaseVT));
2800 }
2801 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size());
2802}
2803
2804/// PromoteSplat - Promote a splat of v8i16 or v16i8 to v4i32.
2805///
2806static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG) {
2807 SDOperand V1 = Op.getOperand(0);
2808 SDOperand Mask = Op.getOperand(2);
2809 MVT::ValueType VT = Op.getValueType();
2810 unsigned NumElems = Mask.getNumOperands();
2811 Mask = getUnpacklMask(NumElems, DAG);
2812 while (NumElems != 4) {
2813 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, Mask);
2814 NumElems >>= 1;
2815 }
2816 V1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, V1);
2817
Chris Lattnere6aa3862007-11-25 00:24:49 +00002818 Mask = getZeroVector(MVT::v4i32, DAG);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002819 SDOperand Shuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v4i32, V1,
2820 DAG.getNode(ISD::UNDEF, MVT::v4i32), Mask);
2821 return DAG.getNode(ISD::BIT_CONVERT, VT, Shuffle);
2822}
2823
2824/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
Chris Lattnere6aa3862007-11-25 00:24:49 +00002825/// vector of zero or undef vector. This produces a shuffle where the low
2826/// element of V2 is swizzled into the zero/undef vector, landing at element
2827/// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002828static SDOperand getShuffleVectorZeroOrUndef(SDOperand V2, MVT::ValueType VT,
2829 unsigned NumElems, unsigned Idx,
2830 bool isZero, SelectionDAG &DAG) {
2831 SDOperand V1 = isZero ? getZeroVector(VT, DAG) : DAG.getNode(ISD::UNDEF, VT);
2832 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
2833 MVT::ValueType EVT = MVT::getVectorElementType(MaskVT);
Chris Lattnere6aa3862007-11-25 00:24:49 +00002834 SmallVector<SDOperand, 16> MaskVec;
2835 for (unsigned i = 0; i != NumElems; ++i)
2836 if (i == Idx) // If this is the insertion idx, put the low elt of V2 here.
2837 MaskVec.push_back(DAG.getConstant(NumElems, EVT));
2838 else
2839 MaskVec.push_back(DAG.getConstant(i, EVT));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002840 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
2841 &MaskVec[0], MaskVec.size());
2842 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask);
2843}
2844
2845/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8.
2846///
2847static SDOperand LowerBuildVectorv16i8(SDOperand Op, unsigned NonZeros,
2848 unsigned NumNonZero, unsigned NumZero,
2849 SelectionDAG &DAG, TargetLowering &TLI) {
2850 if (NumNonZero > 8)
2851 return SDOperand();
2852
2853 SDOperand V(0, 0);
2854 bool First = true;
2855 for (unsigned i = 0; i < 16; ++i) {
2856 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
2857 if (ThisIsNonZero && First) {
2858 if (NumZero)
2859 V = getZeroVector(MVT::v8i16, DAG);
2860 else
2861 V = DAG.getNode(ISD::UNDEF, MVT::v8i16);
2862 First = false;
2863 }
2864
2865 if ((i & 1) != 0) {
2866 SDOperand ThisElt(0, 0), LastElt(0, 0);
2867 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
2868 if (LastIsNonZero) {
2869 LastElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i-1));
2870 }
2871 if (ThisIsNonZero) {
2872 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i));
2873 ThisElt = DAG.getNode(ISD::SHL, MVT::i16,
2874 ThisElt, DAG.getConstant(8, MVT::i8));
2875 if (LastIsNonZero)
2876 ThisElt = DAG.getNode(ISD::OR, MVT::i16, ThisElt, LastElt);
2877 } else
2878 ThisElt = LastElt;
2879
2880 if (ThisElt.Val)
2881 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, ThisElt,
Chris Lattner5872a362008-01-17 07:00:52 +00002882 DAG.getIntPtrConstant(i/2));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002883 }
2884 }
2885
2886 return DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, V);
2887}
2888
2889/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16.
2890///
2891static SDOperand LowerBuildVectorv8i16(SDOperand Op, unsigned NonZeros,
2892 unsigned NumNonZero, unsigned NumZero,
2893 SelectionDAG &DAG, TargetLowering &TLI) {
2894 if (NumNonZero > 4)
2895 return SDOperand();
2896
2897 SDOperand V(0, 0);
2898 bool First = true;
2899 for (unsigned i = 0; i < 8; ++i) {
2900 bool isNonZero = (NonZeros & (1 << i)) != 0;
2901 if (isNonZero) {
2902 if (First) {
2903 if (NumZero)
2904 V = getZeroVector(MVT::v8i16, DAG);
2905 else
2906 V = DAG.getNode(ISD::UNDEF, MVT::v8i16);
2907 First = false;
2908 }
2909 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, Op.getOperand(i),
Chris Lattner5872a362008-01-17 07:00:52 +00002910 DAG.getIntPtrConstant(i));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002911 }
2912 }
2913
2914 return V;
2915}
2916
2917SDOperand
2918X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) {
Chris Lattnere6aa3862007-11-25 00:24:49 +00002919 // All zero's are handled with pxor, all one's are handled with pcmpeqd.
2920 if (ISD::isBuildVectorAllZeros(Op.Val) || ISD::isBuildVectorAllOnes(Op.Val)) {
2921 // Canonicalize this to either <4 x i32> or <2 x i32> (SSE vs MMX) to
2922 // 1) ensure the zero vectors are CSE'd, and 2) ensure that i64 scalars are
2923 // eliminated on x86-32 hosts.
2924 if (Op.getValueType() == MVT::v4i32 || Op.getValueType() == MVT::v2i32)
2925 return Op;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002926
Chris Lattnere6aa3862007-11-25 00:24:49 +00002927 if (ISD::isBuildVectorAllOnes(Op.Val))
2928 return getOnesVector(Op.getValueType(), DAG);
2929 return getZeroVector(Op.getValueType(), DAG);
2930 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002931
2932 MVT::ValueType VT = Op.getValueType();
2933 MVT::ValueType EVT = MVT::getVectorElementType(VT);
2934 unsigned EVTBits = MVT::getSizeInBits(EVT);
2935
2936 unsigned NumElems = Op.getNumOperands();
2937 unsigned NumZero = 0;
2938 unsigned NumNonZero = 0;
2939 unsigned NonZeros = 0;
Evan Chengc1073492007-12-12 06:45:40 +00002940 bool HasNonImms = false;
Evan Cheng75184a92007-12-11 01:46:18 +00002941 SmallSet<SDOperand, 8> Values;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002942 for (unsigned i = 0; i < NumElems; ++i) {
2943 SDOperand Elt = Op.getOperand(i);
Evan Chengc1073492007-12-12 06:45:40 +00002944 if (Elt.getOpcode() == ISD::UNDEF)
2945 continue;
2946 Values.insert(Elt);
2947 if (Elt.getOpcode() != ISD::Constant &&
2948 Elt.getOpcode() != ISD::ConstantFP)
2949 HasNonImms = true;
2950 if (isZeroNode(Elt))
2951 NumZero++;
2952 else {
2953 NonZeros |= (1 << i);
2954 NumNonZero++;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002955 }
2956 }
2957
2958 if (NumNonZero == 0) {
Chris Lattnere6aa3862007-11-25 00:24:49 +00002959 // All undef vector. Return an UNDEF. All zero vectors were handled above.
2960 return DAG.getNode(ISD::UNDEF, VT);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002961 }
2962
2963 // Splat is obviously ok. Let legalizer expand it to a shuffle.
2964 if (Values.size() == 1)
2965 return SDOperand();
2966
2967 // Special case for single non-zero element.
Evan Chengc1073492007-12-12 06:45:40 +00002968 if (NumNonZero == 1 && NumElems <= 4) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002969 unsigned Idx = CountTrailingZeros_32(NonZeros);
2970 SDOperand Item = Op.getOperand(Idx);
2971 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item);
2972 if (Idx == 0)
2973 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
2974 return getShuffleVectorZeroOrUndef(Item, VT, NumElems, Idx,
2975 NumZero > 0, DAG);
Evan Chengc1073492007-12-12 06:45:40 +00002976 else if (!HasNonImms) // Otherwise, it's better to do a constpool load.
2977 return SDOperand();
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002978
2979 if (EVTBits == 32) {
2980 // Turn it into a shuffle of zero and zero-extended scalar to vector.
2981 Item = getShuffleVectorZeroOrUndef(Item, VT, NumElems, 0, NumZero > 0,
2982 DAG);
2983 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
2984 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT);
2985 SmallVector<SDOperand, 8> MaskVec;
2986 for (unsigned i = 0; i < NumElems; i++)
2987 MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT));
2988 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
2989 &MaskVec[0], MaskVec.size());
2990 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, Item,
2991 DAG.getNode(ISD::UNDEF, VT), Mask);
2992 }
2993 }
2994
Dan Gohman21463242007-07-24 22:55:08 +00002995 // A vector full of immediates; various special cases are already
2996 // handled, so this is best done with a single constant-pool load.
Evan Chengc1073492007-12-12 06:45:40 +00002997 if (!HasNonImms)
Dan Gohman21463242007-07-24 22:55:08 +00002998 return SDOperand();
2999
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003000 // Let legalizer expand 2-wide build_vectors.
3001 if (EVTBits == 64)
3002 return SDOperand();
3003
3004 // If element VT is < 32 bits, convert it to inserts into a zero vector.
3005 if (EVTBits == 8 && NumElems == 16) {
3006 SDOperand V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG,
3007 *this);
3008 if (V.Val) return V;
3009 }
3010
3011 if (EVTBits == 16 && NumElems == 8) {
3012 SDOperand V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG,
3013 *this);
3014 if (V.Val) return V;
3015 }
3016
3017 // If element VT is == 32 bits, turn it into a number of shuffles.
3018 SmallVector<SDOperand, 8> V;
3019 V.resize(NumElems);
3020 if (NumElems == 4 && NumZero > 0) {
3021 for (unsigned i = 0; i < 4; ++i) {
3022 bool isZero = !(NonZeros & (1 << i));
3023 if (isZero)
3024 V[i] = getZeroVector(VT, DAG);
3025 else
3026 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i));
3027 }
3028
3029 for (unsigned i = 0; i < 2; ++i) {
3030 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
3031 default: break;
3032 case 0:
3033 V[i] = V[i*2]; // Must be a zero vector.
3034 break;
3035 case 1:
3036 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2+1], V[i*2],
3037 getMOVLMask(NumElems, DAG));
3038 break;
3039 case 2:
3040 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1],
3041 getMOVLMask(NumElems, DAG));
3042 break;
3043 case 3:
3044 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1],
3045 getUnpacklMask(NumElems, DAG));
3046 break;
3047 }
3048 }
3049
3050 // Take advantage of the fact GR32 to VR128 scalar_to_vector (i.e. movd)
3051 // clears the upper bits.
3052 // FIXME: we can do the same for v4f32 case when we know both parts of
3053 // the lower half come from scalar_to_vector (loadf32). We should do
3054 // that in post legalizer dag combiner with target specific hooks.
3055 if (MVT::isInteger(EVT) && (NonZeros & (0x3 << 2)) == 0)
3056 return V[0];
3057 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
3058 MVT::ValueType EVT = MVT::getVectorElementType(MaskVT);
3059 SmallVector<SDOperand, 8> MaskVec;
3060 bool Reverse = (NonZeros & 0x3) == 2;
3061 for (unsigned i = 0; i < 2; ++i)
3062 if (Reverse)
3063 MaskVec.push_back(DAG.getConstant(1-i, EVT));
3064 else
3065 MaskVec.push_back(DAG.getConstant(i, EVT));
3066 Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2;
3067 for (unsigned i = 0; i < 2; ++i)
3068 if (Reverse)
3069 MaskVec.push_back(DAG.getConstant(1-i+NumElems, EVT));
3070 else
3071 MaskVec.push_back(DAG.getConstant(i+NumElems, EVT));
3072 SDOperand ShufMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3073 &MaskVec[0], MaskVec.size());
3074 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[0], V[1], ShufMask);
3075 }
3076
3077 if (Values.size() > 2) {
3078 // Expand into a number of unpckl*.
3079 // e.g. for v4f32
3080 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
3081 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
3082 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
3083 SDOperand UnpckMask = getUnpacklMask(NumElems, DAG);
3084 for (unsigned i = 0; i < NumElems; ++i)
3085 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i));
3086 NumElems >>= 1;
3087 while (NumElems != 0) {
3088 for (unsigned i = 0; i < NumElems; ++i)
3089 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i], V[i + NumElems],
3090 UnpckMask);
3091 NumElems >>= 1;
3092 }
3093 return V[0];
3094 }
3095
3096 return SDOperand();
3097}
3098
Evan Chengfca29242007-12-07 08:07:39 +00003099static
3100SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2,
3101 SDOperand PermMask, SelectionDAG &DAG,
3102 TargetLowering &TLI) {
Evan Cheng75184a92007-12-11 01:46:18 +00003103 SDOperand NewV;
Evan Chengfca29242007-12-07 08:07:39 +00003104 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(8);
3105 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT);
Evan Cheng75184a92007-12-11 01:46:18 +00003106 MVT::ValueType PtrVT = TLI.getPointerTy();
3107 SmallVector<SDOperand, 8> MaskElts(PermMask.Val->op_begin(),
3108 PermMask.Val->op_end());
3109
3110 // First record which half of which vector the low elements come from.
3111 SmallVector<unsigned, 4> LowQuad(4);
3112 for (unsigned i = 0; i < 4; ++i) {
3113 SDOperand Elt = MaskElts[i];
3114 if (Elt.getOpcode() == ISD::UNDEF)
3115 continue;
3116 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
3117 int QuadIdx = EltIdx / 4;
3118 ++LowQuad[QuadIdx];
3119 }
3120 int BestLowQuad = -1;
3121 unsigned MaxQuad = 1;
3122 for (unsigned i = 0; i < 4; ++i) {
3123 if (LowQuad[i] > MaxQuad) {
3124 BestLowQuad = i;
3125 MaxQuad = LowQuad[i];
3126 }
Evan Chengfca29242007-12-07 08:07:39 +00003127 }
3128
Evan Cheng75184a92007-12-11 01:46:18 +00003129 // Record which half of which vector the high elements come from.
3130 SmallVector<unsigned, 4> HighQuad(4);
3131 for (unsigned i = 4; i < 8; ++i) {
3132 SDOperand Elt = MaskElts[i];
3133 if (Elt.getOpcode() == ISD::UNDEF)
3134 continue;
3135 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
3136 int QuadIdx = EltIdx / 4;
3137 ++HighQuad[QuadIdx];
3138 }
3139 int BestHighQuad = -1;
3140 MaxQuad = 1;
3141 for (unsigned i = 0; i < 4; ++i) {
3142 if (HighQuad[i] > MaxQuad) {
3143 BestHighQuad = i;
3144 MaxQuad = HighQuad[i];
3145 }
3146 }
3147
3148 // If it's possible to sort parts of either half with PSHUF{H|L}W, then do it.
3149 if (BestLowQuad != -1 || BestHighQuad != -1) {
3150 // First sort the 4 chunks in order using shufpd.
3151 SmallVector<SDOperand, 8> MaskVec;
3152 if (BestLowQuad != -1)
3153 MaskVec.push_back(DAG.getConstant(BestLowQuad, MVT::i32));
3154 else
3155 MaskVec.push_back(DAG.getConstant(0, MVT::i32));
3156 if (BestHighQuad != -1)
3157 MaskVec.push_back(DAG.getConstant(BestHighQuad, MVT::i32));
3158 else
3159 MaskVec.push_back(DAG.getConstant(1, MVT::i32));
3160 SDOperand Mask= DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, &MaskVec[0],2);
3161 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v2i64,
3162 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V1),
3163 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V2), Mask);
3164 NewV = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, NewV);
3165
3166 // Now sort high and low parts separately.
3167 BitVector InOrder(8);
3168 if (BestLowQuad != -1) {
3169 // Sort lower half in order using PSHUFLW.
3170 MaskVec.clear();
3171 bool AnyOutOrder = false;
3172 for (unsigned i = 0; i != 4; ++i) {
3173 SDOperand Elt = MaskElts[i];
3174 if (Elt.getOpcode() == ISD::UNDEF) {
3175 MaskVec.push_back(Elt);
3176 InOrder.set(i);
3177 } else {
3178 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
3179 if (EltIdx != i)
3180 AnyOutOrder = true;
3181 MaskVec.push_back(DAG.getConstant(EltIdx % 4, MaskEVT));
3182 // If this element is in the right place after this shuffle, then
3183 // remember it.
3184 if ((int)(EltIdx / 4) == BestLowQuad)
3185 InOrder.set(i);
3186 }
3187 }
3188 if (AnyOutOrder) {
3189 for (unsigned i = 4; i != 8; ++i)
3190 MaskVec.push_back(DAG.getConstant(i, MaskEVT));
3191 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8);
3192 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask);
3193 }
3194 }
3195
3196 if (BestHighQuad != -1) {
3197 // Sort high half in order using PSHUFHW if possible.
3198 MaskVec.clear();
3199 for (unsigned i = 0; i != 4; ++i)
3200 MaskVec.push_back(DAG.getConstant(i, MaskEVT));
3201 bool AnyOutOrder = false;
3202 for (unsigned i = 4; i != 8; ++i) {
3203 SDOperand Elt = MaskElts[i];
3204 if (Elt.getOpcode() == ISD::UNDEF) {
3205 MaskVec.push_back(Elt);
3206 InOrder.set(i);
3207 } else {
3208 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
3209 if (EltIdx != i)
3210 AnyOutOrder = true;
3211 MaskVec.push_back(DAG.getConstant((EltIdx % 4) + 4, MaskEVT));
3212 // If this element is in the right place after this shuffle, then
3213 // remember it.
3214 if ((int)(EltIdx / 4) == BestHighQuad)
3215 InOrder.set(i);
3216 }
3217 }
3218 if (AnyOutOrder) {
3219 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8);
3220 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask);
3221 }
3222 }
3223
3224 // The other elements are put in the right place using pextrw and pinsrw.
3225 for (unsigned i = 0; i != 8; ++i) {
3226 if (InOrder[i])
3227 continue;
3228 SDOperand Elt = MaskElts[i];
3229 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
3230 if (EltIdx == i)
3231 continue;
3232 SDOperand ExtOp = (EltIdx < 8)
3233 ? DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1,
3234 DAG.getConstant(EltIdx, PtrVT))
3235 : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2,
3236 DAG.getConstant(EltIdx - 8, PtrVT));
3237 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp,
3238 DAG.getConstant(i, PtrVT));
3239 }
3240 return NewV;
3241 }
3242
3243 // PSHUF{H|L}W are not used. Lower into extracts and inserts but try to use
3244 ///as few as possible.
Evan Chengfca29242007-12-07 08:07:39 +00003245 // First, let's find out how many elements are already in the right order.
3246 unsigned V1InOrder = 0;
3247 unsigned V1FromV1 = 0;
3248 unsigned V2InOrder = 0;
3249 unsigned V2FromV2 = 0;
Evan Cheng75184a92007-12-11 01:46:18 +00003250 SmallVector<SDOperand, 8> V1Elts;
3251 SmallVector<SDOperand, 8> V2Elts;
Evan Chengfca29242007-12-07 08:07:39 +00003252 for (unsigned i = 0; i < 8; ++i) {
Evan Cheng75184a92007-12-11 01:46:18 +00003253 SDOperand Elt = MaskElts[i];
Evan Chengfca29242007-12-07 08:07:39 +00003254 if (Elt.getOpcode() == ISD::UNDEF) {
Evan Cheng75184a92007-12-11 01:46:18 +00003255 V1Elts.push_back(Elt);
3256 V2Elts.push_back(Elt);
Evan Chengfca29242007-12-07 08:07:39 +00003257 ++V1InOrder;
3258 ++V2InOrder;
Evan Cheng75184a92007-12-11 01:46:18 +00003259 continue;
3260 }
3261 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
3262 if (EltIdx == i) {
3263 V1Elts.push_back(Elt);
3264 V2Elts.push_back(DAG.getConstant(i+8, MaskEVT));
3265 ++V1InOrder;
3266 } else if (EltIdx == i+8) {
3267 V1Elts.push_back(Elt);
3268 V2Elts.push_back(DAG.getConstant(i, MaskEVT));
3269 ++V2InOrder;
3270 } else if (EltIdx < 8) {
3271 V1Elts.push_back(Elt);
3272 ++V1FromV1;
Evan Chengfca29242007-12-07 08:07:39 +00003273 } else {
Evan Cheng75184a92007-12-11 01:46:18 +00003274 V2Elts.push_back(DAG.getConstant(EltIdx-8, MaskEVT));
3275 ++V2FromV2;
Evan Chengfca29242007-12-07 08:07:39 +00003276 }
3277 }
3278
3279 if (V2InOrder > V1InOrder) {
3280 PermMask = CommuteVectorShuffleMask(PermMask, DAG);
3281 std::swap(V1, V2);
3282 std::swap(V1Elts, V2Elts);
3283 std::swap(V1FromV1, V2FromV2);
3284 }
3285
Evan Cheng75184a92007-12-11 01:46:18 +00003286 if ((V1FromV1 + V1InOrder) != 8) {
3287 // Some elements are from V2.
3288 if (V1FromV1) {
3289 // If there are elements that are from V1 but out of place,
3290 // then first sort them in place
3291 SmallVector<SDOperand, 8> MaskVec;
3292 for (unsigned i = 0; i < 8; ++i) {
3293 SDOperand Elt = V1Elts[i];
3294 if (Elt.getOpcode() == ISD::UNDEF) {
3295 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEVT));
3296 continue;
3297 }
3298 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
3299 if (EltIdx >= 8)
3300 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEVT));
3301 else
3302 MaskVec.push_back(DAG.getConstant(EltIdx, MaskEVT));
3303 }
3304 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8);
3305 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, V1, V1, Mask);
Evan Chengfca29242007-12-07 08:07:39 +00003306 }
Evan Cheng75184a92007-12-11 01:46:18 +00003307
3308 NewV = V1;
3309 for (unsigned i = 0; i < 8; ++i) {
3310 SDOperand Elt = V1Elts[i];
3311 if (Elt.getOpcode() == ISD::UNDEF)
3312 continue;
3313 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
3314 if (EltIdx < 8)
3315 continue;
3316 SDOperand ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2,
3317 DAG.getConstant(EltIdx - 8, PtrVT));
3318 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp,
3319 DAG.getConstant(i, PtrVT));
3320 }
3321 return NewV;
3322 } else {
3323 // All elements are from V1.
3324 NewV = V1;
3325 for (unsigned i = 0; i < 8; ++i) {
3326 SDOperand Elt = V1Elts[i];
3327 if (Elt.getOpcode() == ISD::UNDEF)
3328 continue;
3329 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
3330 SDOperand ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1,
3331 DAG.getConstant(EltIdx, PtrVT));
3332 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp,
3333 DAG.getConstant(i, PtrVT));
3334 }
3335 return NewV;
3336 }
3337}
3338
Evan Cheng15e8f5a2007-12-15 03:00:47 +00003339/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
3340/// ones, or rewriting v4i32 / v2f32 as 2 wide ones if possible. This can be
3341/// done when every pair / quad of shuffle mask elements point to elements in
3342/// the right sequence. e.g.
Evan Cheng75184a92007-12-11 01:46:18 +00003343/// vector_shuffle <>, <>, < 3, 4, | 10, 11, | 0, 1, | 14, 15>
3344static
Evan Cheng15e8f5a2007-12-15 03:00:47 +00003345SDOperand RewriteAsNarrowerShuffle(SDOperand V1, SDOperand V2,
3346 MVT::ValueType VT,
Evan Cheng75184a92007-12-11 01:46:18 +00003347 SDOperand PermMask, SelectionDAG &DAG,
3348 TargetLowering &TLI) {
3349 unsigned NumElems = PermMask.getNumOperands();
Evan Cheng15e8f5a2007-12-15 03:00:47 +00003350 unsigned NewWidth = (NumElems == 4) ? 2 : 4;
3351 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NewWidth);
3352 MVT::ValueType NewVT = MaskVT;
3353 switch (VT) {
3354 case MVT::v4f32: NewVT = MVT::v2f64; break;
3355 case MVT::v4i32: NewVT = MVT::v2i64; break;
3356 case MVT::v8i16: NewVT = MVT::v4i32; break;
3357 case MVT::v16i8: NewVT = MVT::v4i32; break;
3358 default: assert(false && "Unexpected!");
3359 }
3360
3361 if (NewWidth == 2)
3362 if (MVT::isInteger(VT))
3363 NewVT = MVT::v2i64;
3364 else
3365 NewVT = MVT::v2f64;
3366 unsigned Scale = NumElems / NewWidth;
3367 SmallVector<SDOperand, 8> MaskVec;
Evan Cheng75184a92007-12-11 01:46:18 +00003368 for (unsigned i = 0; i < NumElems; i += Scale) {
3369 unsigned StartIdx = ~0U;
3370 for (unsigned j = 0; j < Scale; ++j) {
3371 SDOperand Elt = PermMask.getOperand(i+j);
3372 if (Elt.getOpcode() == ISD::UNDEF)
3373 continue;
3374 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
3375 if (StartIdx == ~0U)
3376 StartIdx = EltIdx - (EltIdx % Scale);
3377 if (EltIdx != StartIdx + j)
3378 return SDOperand();
3379 }
3380 if (StartIdx == ~0U)
3381 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MVT::i32));
3382 else
3383 MaskVec.push_back(DAG.getConstant(StartIdx / Scale, MVT::i32));
Evan Chengfca29242007-12-07 08:07:39 +00003384 }
3385
Evan Cheng15e8f5a2007-12-15 03:00:47 +00003386 V1 = DAG.getNode(ISD::BIT_CONVERT, NewVT, V1);
3387 V2 = DAG.getNode(ISD::BIT_CONVERT, NewVT, V2);
3388 return DAG.getNode(ISD::VECTOR_SHUFFLE, NewVT, V1, V2,
3389 DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3390 &MaskVec[0], MaskVec.size()));
Evan Chengfca29242007-12-07 08:07:39 +00003391}
3392
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003393SDOperand
3394X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) {
3395 SDOperand V1 = Op.getOperand(0);
3396 SDOperand V2 = Op.getOperand(1);
3397 SDOperand PermMask = Op.getOperand(2);
3398 MVT::ValueType VT = Op.getValueType();
3399 unsigned NumElems = PermMask.getNumOperands();
3400 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
3401 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
3402 bool V1IsSplat = false;
3403 bool V2IsSplat = false;
3404
3405 if (isUndefShuffle(Op.Val))
3406 return DAG.getNode(ISD::UNDEF, VT);
3407
3408 if (isZeroShuffle(Op.Val))
3409 return getZeroVector(VT, DAG);
3410
3411 if (isIdentityMask(PermMask.Val))
3412 return V1;
3413 else if (isIdentityMask(PermMask.Val, true))
3414 return V2;
3415
3416 if (isSplatMask(PermMask.Val)) {
3417 if (NumElems <= 4) return Op;
3418 // Promote it to a v4i32 splat.
3419 return PromoteSplat(Op, DAG);
3420 }
3421
Evan Cheng15e8f5a2007-12-15 03:00:47 +00003422 // If the shuffle can be profitably rewritten as a narrower shuffle, then
3423 // do it!
3424 if (VT == MVT::v8i16 || VT == MVT::v16i8) {
3425 SDOperand NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this);
3426 if (NewOp.Val)
3427 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG));
3428 } else if ((VT == MVT::v4i32 || (VT == MVT::v4f32 && Subtarget->hasSSE2()))) {
3429 // FIXME: Figure out a cleaner way to do this.
3430 // Try to make use of movq to zero out the top part.
3431 if (ISD::isBuildVectorAllZeros(V2.Val)) {
3432 SDOperand NewOp = RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this);
3433 if (NewOp.Val) {
3434 SDOperand NewV1 = NewOp.getOperand(0);
3435 SDOperand NewV2 = NewOp.getOperand(1);
3436 SDOperand NewMask = NewOp.getOperand(2);
3437 if (isCommutedMOVL(NewMask.Val, true, false)) {
3438 NewOp = CommuteVectorShuffle(NewOp, NewV1, NewV2, NewMask, DAG);
3439 NewOp = DAG.getNode(ISD::VECTOR_SHUFFLE, NewOp.getValueType(),
3440 NewV1, NewV2, getMOVLMask(2, DAG));
3441 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG));
3442 }
3443 }
3444 } else if (ISD::isBuildVectorAllZeros(V1.Val)) {
3445 SDOperand NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this);
3446 if (NewOp.Val && X86::isMOVLMask(NewOp.getOperand(2).Val))
3447 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG));
3448 }
3449 }
3450
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003451 if (X86::isMOVLMask(PermMask.Val))
3452 return (V1IsUndef) ? V2 : Op;
3453
3454 if (X86::isMOVSHDUPMask(PermMask.Val) ||
3455 X86::isMOVSLDUPMask(PermMask.Val) ||
3456 X86::isMOVHLPSMask(PermMask.Val) ||
3457 X86::isMOVHPMask(PermMask.Val) ||
3458 X86::isMOVLPMask(PermMask.Val))
3459 return Op;
3460
3461 if (ShouldXformToMOVHLPS(PermMask.Val) ||
3462 ShouldXformToMOVLP(V1.Val, V2.Val, PermMask.Val))
3463 return CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
3464
3465 bool Commuted = false;
Chris Lattnere6aa3862007-11-25 00:24:49 +00003466 // FIXME: This should also accept a bitcast of a splat? Be careful, not
3467 // 1,1,1,1 -> v8i16 though.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003468 V1IsSplat = isSplatVector(V1.Val);
3469 V2IsSplat = isSplatVector(V2.Val);
Chris Lattnere6aa3862007-11-25 00:24:49 +00003470
3471 // Canonicalize the splat or undef, if present, to be on the RHS.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003472 if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) {
3473 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
3474 std::swap(V1IsSplat, V2IsSplat);
3475 std::swap(V1IsUndef, V2IsUndef);
3476 Commuted = true;
3477 }
3478
Evan Cheng15e8f5a2007-12-15 03:00:47 +00003479 // FIXME: Figure out a cleaner way to do this.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003480 if (isCommutedMOVL(PermMask.Val, V2IsSplat, V2IsUndef)) {
3481 if (V2IsUndef) return V1;
3482 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
3483 if (V2IsSplat) {
3484 // V2 is a splat, so the mask may be malformed. That is, it may point
3485 // to any V2 element. The instruction selectior won't like this. Get
3486 // a corrected mask and commute to form a proper MOVS{S|D}.
3487 SDOperand NewMask = getMOVLMask(NumElems, DAG);
3488 if (NewMask.Val != PermMask.Val)
3489 Op = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask);
3490 }
3491 return Op;
3492 }
3493
3494 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) ||
3495 X86::isUNPCKH_v_undef_Mask(PermMask.Val) ||
3496 X86::isUNPCKLMask(PermMask.Val) ||
3497 X86::isUNPCKHMask(PermMask.Val))
3498 return Op;
3499
3500 if (V2IsSplat) {
3501 // Normalize mask so all entries that point to V2 points to its first
3502 // element then try to match unpck{h|l} again. If match, return a
3503 // new vector_shuffle with the corrected mask.
3504 SDOperand NewMask = NormalizeMask(PermMask, DAG);
3505 if (NewMask.Val != PermMask.Val) {
3506 if (X86::isUNPCKLMask(PermMask.Val, true)) {
3507 SDOperand NewMask = getUnpacklMask(NumElems, DAG);
3508 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask);
3509 } else if (X86::isUNPCKHMask(PermMask.Val, true)) {
3510 SDOperand NewMask = getUnpackhMask(NumElems, DAG);
3511 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask);
3512 }
3513 }
3514 }
3515
3516 // Normalize the node to match x86 shuffle ops if needed
3517 if (V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(PermMask.Val))
3518 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
3519
3520 if (Commuted) {
3521 // Commute is back and try unpck* again.
3522 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
3523 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) ||
3524 X86::isUNPCKH_v_undef_Mask(PermMask.Val) ||
3525 X86::isUNPCKLMask(PermMask.Val) ||
3526 X86::isUNPCKHMask(PermMask.Val))
3527 return Op;
3528 }
3529
3530 // If VT is integer, try PSHUF* first, then SHUFP*.
3531 if (MVT::isInteger(VT)) {
Dan Gohman7dc19012007-08-02 21:17:01 +00003532 // MMX doesn't have PSHUFD; it does have PSHUFW. While it's theoretically
3533 // possible to shuffle a v2i32 using PSHUFW, that's not yet implemented.
3534 if (((MVT::getSizeInBits(VT) != 64 || NumElems == 4) &&
3535 X86::isPSHUFDMask(PermMask.Val)) ||
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003536 X86::isPSHUFHWMask(PermMask.Val) ||
3537 X86::isPSHUFLWMask(PermMask.Val)) {
3538 if (V2.getOpcode() != ISD::UNDEF)
3539 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1,
3540 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask);
3541 return Op;
3542 }
3543
3544 if (X86::isSHUFPMask(PermMask.Val) &&
3545 MVT::getSizeInBits(VT) != 64) // Don't do this for MMX.
3546 return Op;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003547 } else {
3548 // Floating point cases in the other order.
3549 if (X86::isSHUFPMask(PermMask.Val))
3550 return Op;
3551 if (X86::isPSHUFDMask(PermMask.Val) ||
3552 X86::isPSHUFHWMask(PermMask.Val) ||
3553 X86::isPSHUFLWMask(PermMask.Val)) {
3554 if (V2.getOpcode() != ISD::UNDEF)
3555 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1,
3556 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask);
3557 return Op;
3558 }
3559 }
3560
Evan Cheng75184a92007-12-11 01:46:18 +00003561 // Handle v8i16 specifically since SSE can do byte extraction and insertion.
3562 if (VT == MVT::v8i16) {
3563 SDOperand NewOp = LowerVECTOR_SHUFFLEv8i16(V1, V2, PermMask, DAG, *this);
3564 if (NewOp.Val)
3565 return NewOp;
3566 }
3567
3568 // Handle all 4 wide cases with a number of shuffles.
3569 if (NumElems == 4 && MVT::getSizeInBits(VT) != 64) {
Evan Chengfca29242007-12-07 08:07:39 +00003570 // Don't do this for MMX.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003571 MVT::ValueType MaskVT = PermMask.getValueType();
3572 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT);
3573 SmallVector<std::pair<int, int>, 8> Locs;
3574 Locs.reserve(NumElems);
Evan Cheng75184a92007-12-11 01:46:18 +00003575 SmallVector<SDOperand, 8> Mask1(NumElems,
3576 DAG.getNode(ISD::UNDEF, MaskEVT));
3577 SmallVector<SDOperand, 8> Mask2(NumElems,
3578 DAG.getNode(ISD::UNDEF, MaskEVT));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003579 unsigned NumHi = 0;
3580 unsigned NumLo = 0;
3581 // If no more than two elements come from either vector. This can be
3582 // implemented with two shuffles. First shuffle gather the elements.
3583 // The second shuffle, which takes the first shuffle as both of its
3584 // vector operands, put the elements into the right order.
3585 for (unsigned i = 0; i != NumElems; ++i) {
3586 SDOperand Elt = PermMask.getOperand(i);
3587 if (Elt.getOpcode() == ISD::UNDEF) {
3588 Locs[i] = std::make_pair(-1, -1);
3589 } else {
3590 unsigned Val = cast<ConstantSDNode>(Elt)->getValue();
3591 if (Val < NumElems) {
3592 Locs[i] = std::make_pair(0, NumLo);
3593 Mask1[NumLo] = Elt;
3594 NumLo++;
3595 } else {
3596 Locs[i] = std::make_pair(1, NumHi);
3597 if (2+NumHi < NumElems)
3598 Mask1[2+NumHi] = Elt;
3599 NumHi++;
3600 }
3601 }
3602 }
3603 if (NumLo <= 2 && NumHi <= 2) {
3604 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
3605 DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3606 &Mask1[0], Mask1.size()));
3607 for (unsigned i = 0; i != NumElems; ++i) {
3608 if (Locs[i].first == -1)
3609 continue;
3610 else {
3611 unsigned Idx = (i < NumElems/2) ? 0 : NumElems;
3612 Idx += Locs[i].first * (NumElems/2) + Locs[i].second;
3613 Mask2[i] = DAG.getConstant(Idx, MaskEVT);
3614 }
3615 }
3616
3617 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1,
3618 DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3619 &Mask2[0], Mask2.size()));
3620 }
3621
3622 // Break it into (shuffle shuffle_hi, shuffle_lo).
3623 Locs.clear();
3624 SmallVector<SDOperand,8> LoMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT));
3625 SmallVector<SDOperand,8> HiMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT));
3626 SmallVector<SDOperand,8> *MaskPtr = &LoMask;
3627 unsigned MaskIdx = 0;
3628 unsigned LoIdx = 0;
3629 unsigned HiIdx = NumElems/2;
3630 for (unsigned i = 0; i != NumElems; ++i) {
3631 if (i == NumElems/2) {
3632 MaskPtr = &HiMask;
3633 MaskIdx = 1;
3634 LoIdx = 0;
3635 HiIdx = NumElems/2;
3636 }
3637 SDOperand Elt = PermMask.getOperand(i);
3638 if (Elt.getOpcode() == ISD::UNDEF) {
3639 Locs[i] = std::make_pair(-1, -1);
3640 } else if (cast<ConstantSDNode>(Elt)->getValue() < NumElems) {
3641 Locs[i] = std::make_pair(MaskIdx, LoIdx);
3642 (*MaskPtr)[LoIdx] = Elt;
3643 LoIdx++;
3644 } else {
3645 Locs[i] = std::make_pair(MaskIdx, HiIdx);
3646 (*MaskPtr)[HiIdx] = Elt;
3647 HiIdx++;
3648 }
3649 }
3650
3651 SDOperand LoShuffle =
3652 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
3653 DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3654 &LoMask[0], LoMask.size()));
3655 SDOperand HiShuffle =
3656 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
3657 DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3658 &HiMask[0], HiMask.size()));
3659 SmallVector<SDOperand, 8> MaskOps;
3660 for (unsigned i = 0; i != NumElems; ++i) {
3661 if (Locs[i].first == -1) {
3662 MaskOps.push_back(DAG.getNode(ISD::UNDEF, MaskEVT));
3663 } else {
3664 unsigned Idx = Locs[i].first * NumElems + Locs[i].second;
3665 MaskOps.push_back(DAG.getConstant(Idx, MaskEVT));
3666 }
3667 }
3668 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, LoShuffle, HiShuffle,
3669 DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3670 &MaskOps[0], MaskOps.size()));
3671 }
3672
3673 return SDOperand();
3674}
3675
3676SDOperand
Nate Begemand77e59e2008-02-11 04:19:36 +00003677X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDOperand Op,
3678 SelectionDAG &DAG) {
3679 MVT::ValueType VT = Op.getValueType();
3680 if (MVT::getSizeInBits(VT) == 8) {
3681 SDOperand Extract = DAG.getNode(X86ISD::PEXTRB, MVT::i32,
3682 Op.getOperand(0), Op.getOperand(1));
3683 SDOperand Assert = DAG.getNode(ISD::AssertZext, MVT::i32, Extract,
3684 DAG.getValueType(VT));
3685 return DAG.getNode(ISD::TRUNCATE, VT, Assert);
3686 } else if (MVT::getSizeInBits(VT) == 16) {
3687 SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, MVT::i32,
3688 Op.getOperand(0), Op.getOperand(1));
3689 SDOperand Assert = DAG.getNode(ISD::AssertZext, MVT::i32, Extract,
3690 DAG.getValueType(VT));
3691 return DAG.getNode(ISD::TRUNCATE, VT, Assert);
3692 }
3693 return SDOperand();
3694}
3695
3696
3697SDOperand
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003698X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) {
3699 if (!isa<ConstantSDNode>(Op.getOperand(1)))
3700 return SDOperand();
3701
Nate Begemand77e59e2008-02-11 04:19:36 +00003702 if (Subtarget->hasSSE41())
3703 return LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG);
3704
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003705 MVT::ValueType VT = Op.getValueType();
3706 // TODO: handle v16i8.
3707 if (MVT::getSizeInBits(VT) == 16) {
Evan Cheng75184a92007-12-11 01:46:18 +00003708 SDOperand Vec = Op.getOperand(0);
3709 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
3710 if (Idx == 0)
3711 return DAG.getNode(ISD::TRUNCATE, MVT::i16,
3712 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32,
3713 DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, Vec),
3714 Op.getOperand(1)));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003715 // Transform it so it match pextrw which produces a 32-bit result.
3716 MVT::ValueType EVT = (MVT::ValueType)(VT+1);
3717 SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, EVT,
3718 Op.getOperand(0), Op.getOperand(1));
3719 SDOperand Assert = DAG.getNode(ISD::AssertZext, EVT, Extract,
3720 DAG.getValueType(VT));
3721 return DAG.getNode(ISD::TRUNCATE, VT, Assert);
3722 } else if (MVT::getSizeInBits(VT) == 32) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003723 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
3724 if (Idx == 0)
3725 return Op;
3726 // SHUFPS the element to the lowest double word, then movss.
3727 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
3728 SmallVector<SDOperand, 8> IdxVec;
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +00003729 IdxVec.
3730 push_back(DAG.getConstant(Idx, MVT::getVectorElementType(MaskVT)));
3731 IdxVec.
3732 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
3733 IdxVec.
3734 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
3735 IdxVec.
3736 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003737 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3738 &IdxVec[0], IdxVec.size());
Evan Cheng75184a92007-12-11 01:46:18 +00003739 SDOperand Vec = Op.getOperand(0);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003740 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(),
3741 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask);
3742 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec,
Chris Lattner5872a362008-01-17 07:00:52 +00003743 DAG.getIntPtrConstant(0));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003744 } else if (MVT::getSizeInBits(VT) == 64) {
Nate Begemand77e59e2008-02-11 04:19:36 +00003745 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
3746 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
3747 // to match extract_elt for f64.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003748 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
3749 if (Idx == 0)
3750 return Op;
3751
3752 // UNPCKHPD the element to the lowest double word, then movsd.
3753 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
3754 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
3755 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
3756 SmallVector<SDOperand, 8> IdxVec;
3757 IdxVec.push_back(DAG.getConstant(1, MVT::getVectorElementType(MaskVT)));
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +00003758 IdxVec.
3759 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003760 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3761 &IdxVec[0], IdxVec.size());
Evan Cheng75184a92007-12-11 01:46:18 +00003762 SDOperand Vec = Op.getOperand(0);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003763 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(),
3764 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask);
3765 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec,
Chris Lattner5872a362008-01-17 07:00:52 +00003766 DAG.getIntPtrConstant(0));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003767 }
3768
3769 return SDOperand();
3770}
3771
3772SDOperand
Nate Begemand77e59e2008-02-11 04:19:36 +00003773X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDOperand Op, SelectionDAG &DAG){
3774 MVT::ValueType VT = Op.getValueType();
3775 MVT::ValueType EVT = MVT::getVectorElementType(VT);
3776
3777 SDOperand N0 = Op.getOperand(0);
3778 SDOperand N1 = Op.getOperand(1);
3779 SDOperand N2 = Op.getOperand(2);
3780
3781 if ((MVT::getSizeInBits(EVT) == 8) || (MVT::getSizeInBits(EVT) == 16)) {
3782 unsigned Opc = (MVT::getSizeInBits(EVT) == 8) ? X86ISD::PINSRB
3783 : X86ISD::PINSRW;
3784 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
3785 // argument.
3786 if (N1.getValueType() != MVT::i32)
3787 N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1);
3788 if (N2.getValueType() != MVT::i32)
3789 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue());
3790 return DAG.getNode(Opc, VT, N0, N1, N2);
3791 } else if (EVT == MVT::f32) {
3792 // Bits [7:6] of the constant are the source select. This will always be
3793 // zero here. The DAG Combiner may combine an extract_elt index into these
3794 // bits. For example (insert (extract, 3), 2) could be matched by putting
3795 // the '3' into bits [7:6] of X86ISD::INSERTPS.
3796 // Bits [5:4] of the constant are the destination select. This is the
3797 // value of the incoming immediate.
3798 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
3799 // combine either bitwise AND or insert of float 0.0 to set these bits.
3800 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue() << 4);
3801 return DAG.getNode(X86ISD::INSERTPS, VT, N0, N1, N2);
3802 }
3803 return SDOperand();
3804}
3805
3806SDOperand
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003807X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003808 MVT::ValueType VT = Op.getValueType();
Evan Chenge12a7eb2007-12-12 07:55:34 +00003809 MVT::ValueType EVT = MVT::getVectorElementType(VT);
Nate Begemand77e59e2008-02-11 04:19:36 +00003810
3811 if (Subtarget->hasSSE41())
3812 return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG);
3813
Evan Chenge12a7eb2007-12-12 07:55:34 +00003814 if (EVT == MVT::i8)
3815 return SDOperand();
3816
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003817 SDOperand N0 = Op.getOperand(0);
3818 SDOperand N1 = Op.getOperand(1);
3819 SDOperand N2 = Op.getOperand(2);
Evan Chenge12a7eb2007-12-12 07:55:34 +00003820
3821 if (MVT::getSizeInBits(EVT) == 16) {
3822 // Transform it so it match pinsrw which expects a 16-bit value in a GR32
3823 // as its second argument.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003824 if (N1.getValueType() != MVT::i32)
3825 N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1);
3826 if (N2.getValueType() != MVT::i32)
Chris Lattner5872a362008-01-17 07:00:52 +00003827 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue());
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003828 return DAG.getNode(X86ISD::PINSRW, VT, N0, N1, N2);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003829 }
Nate Begeman9e1a41f2008-01-05 20:51:30 +00003830 return SDOperand();
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003831}
3832
3833SDOperand
3834X86TargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) {
3835 SDOperand AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0));
Evan Chengd1045a62008-02-18 23:04:32 +00003836 MVT::ValueType VT = MVT::v2i32;
3837 switch (Op.getValueType()) {
3838 default: break;
3839 case MVT::v16i8:
3840 case MVT::v8i16:
3841 VT = MVT::v4i32;
3842 break;
3843 }
3844 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(),
3845 DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, AnyExt));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003846}
3847
3848// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
3849// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
3850// one of the above mentioned nodes. It has to be wrapped because otherwise
3851// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
3852// be used to form addressing mode. These wrapped nodes will be selected
3853// into MOV32ri.
3854SDOperand
3855X86TargetLowering::LowerConstantPool(SDOperand Op, SelectionDAG &DAG) {
3856 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
3857 SDOperand Result = DAG.getTargetConstantPool(CP->getConstVal(),
3858 getPointerTy(),
3859 CP->getAlignment());
3860 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result);
3861 // With PIC, the address is actually $g + Offset.
3862 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
3863 !Subtarget->isPICStyleRIPRel()) {
3864 Result = DAG.getNode(ISD::ADD, getPointerTy(),
3865 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
3866 Result);
3867 }
3868
3869 return Result;
3870}
3871
3872SDOperand
3873X86TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) {
3874 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3875 SDOperand Result = DAG.getTargetGlobalAddress(GV, getPointerTy());
Evan Cheng2e28d622008-02-02 04:07:54 +00003876 // If it's a debug information descriptor, don't mess with it.
3877 if (DAG.isVerifiedDebugInfoDesc(Op))
3878 return Result;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003879 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result);
3880 // With PIC, the address is actually $g + Offset.
3881 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
3882 !Subtarget->isPICStyleRIPRel()) {
3883 Result = DAG.getNode(ISD::ADD, getPointerTy(),
3884 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
3885 Result);
3886 }
3887
3888 // For Darwin & Mingw32, external and weak symbols are indirect, so we want to
3889 // load the value at address GV, not the value of GV itself. This means that
3890 // the GlobalAddress must be in the base or index register of the address, not
3891 // the GV offset field. Platform check is inside GVRequiresExtraLoad() call
3892 // The same applies for external symbols during PIC codegen
3893 if (Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false))
Dan Gohman12a9c082008-02-06 22:27:42 +00003894 Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), Result,
Dan Gohmanfb020b62008-02-07 18:41:25 +00003895 PseudoSourceValue::getGOT(), 0);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003896
3897 return Result;
3898}
3899
3900// Lower ISD::GlobalTLSAddress using the "general dynamic" model
3901static SDOperand
3902LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
3903 const MVT::ValueType PtrVT) {
3904 SDOperand InFlag;
3905 SDOperand Chain = DAG.getCopyToReg(DAG.getEntryNode(), X86::EBX,
3906 DAG.getNode(X86ISD::GlobalBaseReg,
3907 PtrVT), InFlag);
3908 InFlag = Chain.getValue(1);
3909
3910 // emit leal symbol@TLSGD(,%ebx,1), %eax
3911 SDVTList NodeTys = DAG.getVTList(PtrVT, MVT::Other, MVT::Flag);
3912 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(),
3913 GA->getValueType(0),
3914 GA->getOffset());
3915 SDOperand Ops[] = { Chain, TGA, InFlag };
3916 SDOperand Result = DAG.getNode(X86ISD::TLSADDR, NodeTys, Ops, 3);
3917 InFlag = Result.getValue(2);
3918 Chain = Result.getValue(1);
3919
3920 // call ___tls_get_addr. This function receives its argument in
3921 // the register EAX.
3922 Chain = DAG.getCopyToReg(Chain, X86::EAX, Result, InFlag);
3923 InFlag = Chain.getValue(1);
3924
3925 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
3926 SDOperand Ops1[] = { Chain,
3927 DAG.getTargetExternalSymbol("___tls_get_addr",
3928 PtrVT),
3929 DAG.getRegister(X86::EAX, PtrVT),
3930 DAG.getRegister(X86::EBX, PtrVT),
3931 InFlag };
3932 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops1, 5);
3933 InFlag = Chain.getValue(1);
3934
3935 return DAG.getCopyFromReg(Chain, X86::EAX, PtrVT, InFlag);
3936}
3937
3938// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or
3939// "local exec" model.
3940static SDOperand
3941LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
3942 const MVT::ValueType PtrVT) {
3943 // Get the Thread Pointer
3944 SDOperand ThreadPointer = DAG.getNode(X86ISD::THREAD_POINTER, PtrVT);
3945 // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial
3946 // exec)
3947 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(),
3948 GA->getValueType(0),
3949 GA->getOffset());
3950 SDOperand Offset = DAG.getNode(X86ISD::Wrapper, PtrVT, TGA);
3951
3952 if (GA->getGlobal()->isDeclaration()) // initial exec TLS model
Dan Gohman12a9c082008-02-06 22:27:42 +00003953 Offset = DAG.getLoad(PtrVT, DAG.getEntryNode(), Offset,
Dan Gohmanfb020b62008-02-07 18:41:25 +00003954 PseudoSourceValue::getGOT(), 0);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003955
3956 // The address of the thread local variable is the add of the thread
3957 // pointer with the offset of the variable.
3958 return DAG.getNode(ISD::ADD, PtrVT, ThreadPointer, Offset);
3959}
3960
3961SDOperand
3962X86TargetLowering::LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG) {
3963 // TODO: implement the "local dynamic" model
3964 // TODO: implement the "initial exec"model for pic executables
3965 assert(!Subtarget->is64Bit() && Subtarget->isTargetELF() &&
3966 "TLS not implemented for non-ELF and 64-bit targets");
3967 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
3968 // If the relocation model is PIC, use the "General Dynamic" TLS Model,
3969 // otherwise use the "Local Exec"TLS Model
3970 if (getTargetMachine().getRelocationModel() == Reloc::PIC_)
3971 return LowerToTLSGeneralDynamicModel(GA, DAG, getPointerTy());
3972 else
3973 return LowerToTLSExecModel(GA, DAG, getPointerTy());
3974}
3975
3976SDOperand
3977X86TargetLowering::LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG) {
3978 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
3979 SDOperand Result = DAG.getTargetExternalSymbol(Sym, getPointerTy());
3980 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result);
3981 // With PIC, the address is actually $g + Offset.
3982 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
3983 !Subtarget->isPICStyleRIPRel()) {
3984 Result = DAG.getNode(ISD::ADD, getPointerTy(),
3985 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
3986 Result);
3987 }
3988
3989 return Result;
3990}
3991
3992SDOperand X86TargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) {
3993 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
3994 SDOperand Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy());
3995 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result);
3996 // With PIC, the address is actually $g + Offset.
3997 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
3998 !Subtarget->isPICStyleRIPRel()) {
3999 Result = DAG.getNode(ISD::ADD, getPointerTy(),
4000 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
4001 Result);
4002 }
4003
4004 return Result;
4005}
4006
Chris Lattner62814a32007-10-17 06:02:13 +00004007/// LowerShift - Lower SRA_PARTS and friends, which return two i32 values and
4008/// take a 2 x i32 value to shift plus a shift amount.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004009SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) {
Chris Lattner62814a32007-10-17 06:02:13 +00004010 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 &&
4011 "Not an i64 shift!");
4012 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
4013 SDOperand ShOpLo = Op.getOperand(0);
4014 SDOperand ShOpHi = Op.getOperand(1);
4015 SDOperand ShAmt = Op.getOperand(2);
4016 SDOperand Tmp1 = isSRA ?
4017 DAG.getNode(ISD::SRA, MVT::i32, ShOpHi, DAG.getConstant(31, MVT::i8)) :
4018 DAG.getConstant(0, MVT::i32);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004019
Chris Lattner62814a32007-10-17 06:02:13 +00004020 SDOperand Tmp2, Tmp3;
4021 if (Op.getOpcode() == ISD::SHL_PARTS) {
4022 Tmp2 = DAG.getNode(X86ISD::SHLD, MVT::i32, ShOpHi, ShOpLo, ShAmt);
4023 Tmp3 = DAG.getNode(ISD::SHL, MVT::i32, ShOpLo, ShAmt);
4024 } else {
4025 Tmp2 = DAG.getNode(X86ISD::SHRD, MVT::i32, ShOpLo, ShOpHi, ShAmt);
4026 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, MVT::i32, ShOpHi, ShAmt);
4027 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004028
Chris Lattner62814a32007-10-17 06:02:13 +00004029 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);
4030 SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt,
4031 DAG.getConstant(32, MVT::i8));
4032 SDOperand Cond = DAG.getNode(X86ISD::CMP, MVT::i32,
4033 AndNode, DAG.getConstant(0, MVT::i8));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004034
Chris Lattner62814a32007-10-17 06:02:13 +00004035 SDOperand Hi, Lo;
4036 SDOperand CC = DAG.getConstant(X86::COND_NE, MVT::i8);
4037 VTs = DAG.getNodeValueTypes(MVT::i32, MVT::Flag);
4038 SmallVector<SDOperand, 4> Ops;
4039 if (Op.getOpcode() == ISD::SHL_PARTS) {
4040 Ops.push_back(Tmp2);
4041 Ops.push_back(Tmp3);
4042 Ops.push_back(CC);
4043 Ops.push_back(Cond);
4044 Hi = DAG.getNode(X86ISD::CMOV, MVT::i32, &Ops[0], Ops.size());
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004045
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004046 Ops.clear();
Chris Lattner62814a32007-10-17 06:02:13 +00004047 Ops.push_back(Tmp3);
4048 Ops.push_back(Tmp1);
4049 Ops.push_back(CC);
4050 Ops.push_back(Cond);
4051 Lo = DAG.getNode(X86ISD::CMOV, MVT::i32, &Ops[0], Ops.size());
4052 } else {
4053 Ops.push_back(Tmp2);
4054 Ops.push_back(Tmp3);
4055 Ops.push_back(CC);
4056 Ops.push_back(Cond);
4057 Lo = DAG.getNode(X86ISD::CMOV, MVT::i32, &Ops[0], Ops.size());
4058
4059 Ops.clear();
4060 Ops.push_back(Tmp3);
4061 Ops.push_back(Tmp1);
4062 Ops.push_back(CC);
4063 Ops.push_back(Cond);
4064 Hi = DAG.getNode(X86ISD::CMOV, MVT::i32, &Ops[0], Ops.size());
4065 }
4066
4067 VTs = DAG.getNodeValueTypes(MVT::i32, MVT::i32);
4068 Ops.clear();
4069 Ops.push_back(Lo);
4070 Ops.push_back(Hi);
4071 return DAG.getNode(ISD::MERGE_VALUES, VTs, 2, &Ops[0], Ops.size());
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004072}
4073
4074SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) {
4075 assert(Op.getOperand(0).getValueType() <= MVT::i64 &&
4076 Op.getOperand(0).getValueType() >= MVT::i16 &&
4077 "Unknown SINT_TO_FP to lower!");
4078
4079 SDOperand Result;
4080 MVT::ValueType SrcVT = Op.getOperand(0).getValueType();
4081 unsigned Size = MVT::getSizeInBits(SrcVT)/8;
4082 MachineFunction &MF = DAG.getMachineFunction();
4083 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
4084 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
4085 SDOperand Chain = DAG.getStore(DAG.getEntryNode(), Op.getOperand(0),
Dan Gohman12a9c082008-02-06 22:27:42 +00004086 StackSlot,
Dan Gohmanfb020b62008-02-07 18:41:25 +00004087 PseudoSourceValue::getFixedStack(),
Dan Gohman12a9c082008-02-06 22:27:42 +00004088 SSFI);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004089
Dale Johannesen2fc20782007-09-14 22:26:36 +00004090 // These are really Legal; caller falls through into that case.
Chris Lattnercf515b52008-01-16 06:24:21 +00004091 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType()))
Dale Johannesene0e0fd02007-09-23 14:52:20 +00004092 return Result;
Chris Lattnerfca7f222008-01-16 06:19:45 +00004093 if (SrcVT == MVT::i64 && Op.getValueType() != MVT::f80 &&
Dale Johannesen958b08b2007-09-19 23:55:34 +00004094 Subtarget->is64Bit())
4095 return Result;
Dale Johannesen2fc20782007-09-14 22:26:36 +00004096
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004097 // Build the FILD
4098 SDVTList Tys;
Chris Lattnercf515b52008-01-16 06:24:21 +00004099 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
Dale Johannesen2fc20782007-09-14 22:26:36 +00004100 if (useSSE)
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004101 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag);
4102 else
4103 Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
4104 SmallVector<SDOperand, 8> Ops;
4105 Ops.push_back(Chain);
4106 Ops.push_back(StackSlot);
4107 Ops.push_back(DAG.getValueType(SrcVT));
Dale Johannesen2fc20782007-09-14 22:26:36 +00004108 Result = DAG.getNode(useSSE ? X86ISD::FILD_FLAG :X86ISD::FILD,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004109 Tys, &Ops[0], Ops.size());
4110
Dale Johannesen2fc20782007-09-14 22:26:36 +00004111 if (useSSE) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004112 Chain = Result.getValue(1);
4113 SDOperand InFlag = Result.getValue(2);
4114
4115 // FIXME: Currently the FST is flagged to the FILD_FLAG. This
4116 // shouldn't be necessary except that RFP cannot be live across
4117 // multiple blocks. When stackifier is fixed, they can be uncoupled.
4118 MachineFunction &MF = DAG.getMachineFunction();
4119 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
4120 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
4121 Tys = DAG.getVTList(MVT::Other);
4122 SmallVector<SDOperand, 8> Ops;
4123 Ops.push_back(Chain);
4124 Ops.push_back(Result);
4125 Ops.push_back(StackSlot);
4126 Ops.push_back(DAG.getValueType(Op.getValueType()));
4127 Ops.push_back(InFlag);
4128 Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size());
Dan Gohman12a9c082008-02-06 22:27:42 +00004129 Result = DAG.getLoad(Op.getValueType(), Chain, StackSlot,
Dan Gohmanfb020b62008-02-07 18:41:25 +00004130 PseudoSourceValue::getFixedStack(), SSFI);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004131 }
4132
4133 return Result;
4134}
4135
Chris Lattnerdfb947d2007-11-24 07:07:01 +00004136std::pair<SDOperand,SDOperand> X86TargetLowering::
4137FP_TO_SINTHelper(SDOperand Op, SelectionDAG &DAG) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004138 assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 &&
4139 "Unknown FP_TO_SINT to lower!");
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004140
Dale Johannesen2fc20782007-09-14 22:26:36 +00004141 // These are really Legal.
Dale Johannesene0e0fd02007-09-23 14:52:20 +00004142 if (Op.getValueType() == MVT::i32 &&
Chris Lattnercf515b52008-01-16 06:24:21 +00004143 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
Chris Lattnerdfb947d2007-11-24 07:07:01 +00004144 return std::make_pair(SDOperand(), SDOperand());
Dale Johannesen958b08b2007-09-19 23:55:34 +00004145 if (Subtarget->is64Bit() &&
4146 Op.getValueType() == MVT::i64 &&
4147 Op.getOperand(0).getValueType() != MVT::f80)
Chris Lattnerdfb947d2007-11-24 07:07:01 +00004148 return std::make_pair(SDOperand(), SDOperand());
Dale Johannesen2fc20782007-09-14 22:26:36 +00004149
Evan Cheng05441e62007-10-15 20:11:21 +00004150 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary
4151 // stack slot.
4152 MachineFunction &MF = DAG.getMachineFunction();
4153 unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8;
4154 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
4155 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004156 unsigned Opc;
4157 switch (Op.getValueType()) {
Chris Lattnerdfb947d2007-11-24 07:07:01 +00004158 default: assert(0 && "Invalid FP_TO_SINT to lower!");
4159 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
4160 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
4161 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004162 }
4163
4164 SDOperand Chain = DAG.getEntryNode();
4165 SDOperand Value = Op.getOperand(0);
Chris Lattnercf515b52008-01-16 06:24:21 +00004166 if (isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004167 assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!");
Dan Gohman12a9c082008-02-06 22:27:42 +00004168 Chain = DAG.getStore(Chain, Value, StackSlot,
Dan Gohmanfb020b62008-02-07 18:41:25 +00004169 PseudoSourceValue::getFixedStack(), SSFI);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004170 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
4171 SDOperand Ops[] = {
4172 Chain, StackSlot, DAG.getValueType(Op.getOperand(0).getValueType())
4173 };
4174 Value = DAG.getNode(X86ISD::FLD, Tys, Ops, 3);
4175 Chain = Value.getValue(1);
4176 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
4177 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
4178 }
4179
4180 // Build the FP_TO_INT*_IN_MEM
4181 SDOperand Ops[] = { Chain, Value, StackSlot };
4182 SDOperand FIST = DAG.getNode(Opc, MVT::Other, Ops, 3);
4183
Chris Lattnerdfb947d2007-11-24 07:07:01 +00004184 return std::make_pair(FIST, StackSlot);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004185}
4186
Chris Lattnerdfb947d2007-11-24 07:07:01 +00004187SDOperand X86TargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) {
Chris Lattnerdfb947d2007-11-24 07:07:01 +00004188 std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(Op, DAG);
4189 SDOperand FIST = Vals.first, StackSlot = Vals.second;
4190 if (FIST.Val == 0) return SDOperand();
4191
4192 // Load the result.
4193 return DAG.getLoad(Op.getValueType(), FIST, StackSlot, NULL, 0);
4194}
4195
4196SDNode *X86TargetLowering::ExpandFP_TO_SINT(SDNode *N, SelectionDAG &DAG) {
4197 std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(SDOperand(N, 0), DAG);
4198 SDOperand FIST = Vals.first, StackSlot = Vals.second;
4199 if (FIST.Val == 0) return 0;
4200
4201 // Return an i64 load from the stack slot.
4202 SDOperand Res = DAG.getLoad(MVT::i64, FIST, StackSlot, NULL, 0);
4203
4204 // Use a MERGE_VALUES node to drop the chain result value.
4205 return DAG.getNode(ISD::MERGE_VALUES, MVT::i64, Res).Val;
4206}
4207
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004208SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) {
4209 MVT::ValueType VT = Op.getValueType();
4210 MVT::ValueType EltVT = VT;
4211 if (MVT::isVector(VT))
4212 EltVT = MVT::getVectorElementType(VT);
4213 const Type *OpNTy = MVT::getTypeForValueType(EltVT);
4214 std::vector<Constant*> CV;
4215 if (EltVT == MVT::f64) {
Dale Johannesen1616e902007-09-11 18:32:33 +00004216 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, ~(1ULL << 63))));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004217 CV.push_back(C);
4218 CV.push_back(C);
4219 } else {
Dale Johannesen1616e902007-09-11 18:32:33 +00004220 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, ~(1U << 31))));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004221 CV.push_back(C);
4222 CV.push_back(C);
4223 CV.push_back(C);
4224 CV.push_back(C);
4225 }
Dan Gohman11821702007-07-27 17:16:43 +00004226 Constant *C = ConstantVector::get(CV);
4227 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4);
Dan Gohman12a9c082008-02-06 22:27:42 +00004228 SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx,
Dan Gohmanfb020b62008-02-07 18:41:25 +00004229 PseudoSourceValue::getConstantPool(), 0,
Dan Gohman11821702007-07-27 17:16:43 +00004230 false, 16);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004231 return DAG.getNode(X86ISD::FAND, VT, Op.getOperand(0), Mask);
4232}
4233
4234SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) {
4235 MVT::ValueType VT = Op.getValueType();
4236 MVT::ValueType EltVT = VT;
Evan Cheng92b8f782007-07-19 23:36:01 +00004237 unsigned EltNum = 1;
4238 if (MVT::isVector(VT)) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004239 EltVT = MVT::getVectorElementType(VT);
Evan Cheng92b8f782007-07-19 23:36:01 +00004240 EltNum = MVT::getVectorNumElements(VT);
4241 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004242 const Type *OpNTy = MVT::getTypeForValueType(EltVT);
4243 std::vector<Constant*> CV;
4244 if (EltVT == MVT::f64) {
Dale Johannesen1616e902007-09-11 18:32:33 +00004245 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, 1ULL << 63)));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004246 CV.push_back(C);
4247 CV.push_back(C);
4248 } else {
Dale Johannesen1616e902007-09-11 18:32:33 +00004249 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, 1U << 31)));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004250 CV.push_back(C);
4251 CV.push_back(C);
4252 CV.push_back(C);
4253 CV.push_back(C);
4254 }
Dan Gohman11821702007-07-27 17:16:43 +00004255 Constant *C = ConstantVector::get(CV);
4256 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4);
Dan Gohman12a9c082008-02-06 22:27:42 +00004257 SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx,
Dan Gohmanfb020b62008-02-07 18:41:25 +00004258 PseudoSourceValue::getConstantPool(), 0,
Dan Gohman11821702007-07-27 17:16:43 +00004259 false, 16);
Evan Cheng92b8f782007-07-19 23:36:01 +00004260 if (MVT::isVector(VT)) {
Evan Cheng92b8f782007-07-19 23:36:01 +00004261 return DAG.getNode(ISD::BIT_CONVERT, VT,
4262 DAG.getNode(ISD::XOR, MVT::v2i64,
4263 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Op.getOperand(0)),
4264 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Mask)));
4265 } else {
Evan Cheng92b8f782007-07-19 23:36:01 +00004266 return DAG.getNode(X86ISD::FXOR, VT, Op.getOperand(0), Mask);
4267 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004268}
4269
4270SDOperand X86TargetLowering::LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) {
4271 SDOperand Op0 = Op.getOperand(0);
4272 SDOperand Op1 = Op.getOperand(1);
4273 MVT::ValueType VT = Op.getValueType();
4274 MVT::ValueType SrcVT = Op1.getValueType();
4275 const Type *SrcTy = MVT::getTypeForValueType(SrcVT);
4276
4277 // If second operand is smaller, extend it first.
4278 if (MVT::getSizeInBits(SrcVT) < MVT::getSizeInBits(VT)) {
4279 Op1 = DAG.getNode(ISD::FP_EXTEND, VT, Op1);
4280 SrcVT = VT;
Dale Johannesenb9de9f02007-09-06 18:13:44 +00004281 SrcTy = MVT::getTypeForValueType(SrcVT);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004282 }
Dale Johannesenfb0fa912007-10-21 01:07:44 +00004283 // And if it is bigger, shrink it first.
4284 if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) {
Chris Lattner5872a362008-01-17 07:00:52 +00004285 Op1 = DAG.getNode(ISD::FP_ROUND, VT, Op1, DAG.getIntPtrConstant(1));
Dale Johannesenfb0fa912007-10-21 01:07:44 +00004286 SrcVT = VT;
4287 SrcTy = MVT::getTypeForValueType(SrcVT);
4288 }
4289
4290 // At this point the operands and the result should have the same
4291 // type, and that won't be f80 since that is not custom lowered.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004292
4293 // First get the sign bit of second operand.
4294 std::vector<Constant*> CV;
4295 if (SrcVT == MVT::f64) {
Dale Johannesen1616e902007-09-11 18:32:33 +00004296 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 1ULL << 63))));
4297 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0))));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004298 } else {
Dale Johannesen1616e902007-09-11 18:32:33 +00004299 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 1U << 31))));
4300 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0))));
4301 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0))));
4302 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0))));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004303 }
Dan Gohman11821702007-07-27 17:16:43 +00004304 Constant *C = ConstantVector::get(CV);
4305 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4);
Dan Gohman12a9c082008-02-06 22:27:42 +00004306 SDOperand Mask1 = DAG.getLoad(SrcVT, DAG.getEntryNode(), CPIdx,
Dan Gohmanfb020b62008-02-07 18:41:25 +00004307 PseudoSourceValue::getConstantPool(), 0,
Dan Gohman11821702007-07-27 17:16:43 +00004308 false, 16);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004309 SDOperand SignBit = DAG.getNode(X86ISD::FAND, SrcVT, Op1, Mask1);
4310
4311 // Shift sign bit right or left if the two operands have different types.
4312 if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) {
4313 // Op0 is MVT::f32, Op1 is MVT::f64.
4314 SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2f64, SignBit);
4315 SignBit = DAG.getNode(X86ISD::FSRL, MVT::v2f64, SignBit,
4316 DAG.getConstant(32, MVT::i32));
4317 SignBit = DAG.getNode(ISD::BIT_CONVERT, MVT::v4f32, SignBit);
4318 SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::f32, SignBit,
Chris Lattner5872a362008-01-17 07:00:52 +00004319 DAG.getIntPtrConstant(0));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004320 }
4321
4322 // Clear first operand sign bit.
4323 CV.clear();
4324 if (VT == MVT::f64) {
Dale Johannesen1616e902007-09-11 18:32:33 +00004325 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, ~(1ULL << 63)))));
4326 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0))));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004327 } else {
Dale Johannesen1616e902007-09-11 18:32:33 +00004328 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, ~(1U << 31)))));
4329 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0))));
4330 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0))));
4331 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0))));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004332 }
Dan Gohman11821702007-07-27 17:16:43 +00004333 C = ConstantVector::get(CV);
4334 CPIdx = DAG.getConstantPool(C, getPointerTy(), 4);
Dan Gohman12a9c082008-02-06 22:27:42 +00004335 SDOperand Mask2 = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx,
Dan Gohmanfb020b62008-02-07 18:41:25 +00004336 PseudoSourceValue::getConstantPool(), 0,
Dan Gohman11821702007-07-27 17:16:43 +00004337 false, 16);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004338 SDOperand Val = DAG.getNode(X86ISD::FAND, VT, Op0, Mask2);
4339
4340 // Or the value with the sign bit.
4341 return DAG.getNode(X86ISD::FOR, VT, Val, SignBit);
4342}
4343
Evan Cheng621216e2007-09-29 00:00:36 +00004344SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) {
Evan Cheng950aac02007-09-25 01:57:46 +00004345 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer");
Evan Cheng6afec3d2007-09-26 00:45:55 +00004346 SDOperand Cond;
Evan Cheng950aac02007-09-25 01:57:46 +00004347 SDOperand Op0 = Op.getOperand(0);
4348 SDOperand Op1 = Op.getOperand(1);
4349 SDOperand CC = Op.getOperand(2);
4350 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
4351 bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType());
4352 unsigned X86CC;
4353
Evan Cheng950aac02007-09-25 01:57:46 +00004354 if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC,
Evan Cheng6afec3d2007-09-26 00:45:55 +00004355 Op0, Op1, DAG)) {
Evan Cheng621216e2007-09-29 00:00:36 +00004356 Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1);
4357 return DAG.getNode(X86ISD::SETCC, MVT::i8,
Evan Cheng950aac02007-09-25 01:57:46 +00004358 DAG.getConstant(X86CC, MVT::i8), Cond);
Evan Cheng6afec3d2007-09-26 00:45:55 +00004359 }
Evan Cheng950aac02007-09-25 01:57:46 +00004360
4361 assert(isFP && "Illegal integer SetCC!");
4362
Evan Cheng621216e2007-09-29 00:00:36 +00004363 Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1);
Evan Cheng950aac02007-09-25 01:57:46 +00004364 switch (SetCCOpcode) {
4365 default: assert(false && "Illegal floating point SetCC!");
4366 case ISD::SETOEQ: { // !PF & ZF
Evan Cheng621216e2007-09-29 00:00:36 +00004367 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8,
Evan Cheng950aac02007-09-25 01:57:46 +00004368 DAG.getConstant(X86::COND_NP, MVT::i8), Cond);
Evan Cheng621216e2007-09-29 00:00:36 +00004369 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
Evan Cheng950aac02007-09-25 01:57:46 +00004370 DAG.getConstant(X86::COND_E, MVT::i8), Cond);
4371 return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2);
4372 }
4373 case ISD::SETUNE: { // PF | !ZF
Evan Cheng621216e2007-09-29 00:00:36 +00004374 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8,
Evan Cheng950aac02007-09-25 01:57:46 +00004375 DAG.getConstant(X86::COND_P, MVT::i8), Cond);
Evan Cheng621216e2007-09-29 00:00:36 +00004376 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
Evan Cheng950aac02007-09-25 01:57:46 +00004377 DAG.getConstant(X86::COND_NE, MVT::i8), Cond);
4378 return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2);
4379 }
4380 }
4381}
4382
4383
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004384SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) {
4385 bool addTest = true;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004386 SDOperand Cond = Op.getOperand(0);
4387 SDOperand CC;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004388
4389 if (Cond.getOpcode() == ISD::SETCC)
Evan Cheng621216e2007-09-29 00:00:36 +00004390 Cond = LowerSETCC(Cond, DAG);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004391
Evan Cheng50d37ab2007-10-08 22:16:29 +00004392 // If condition flag is set by a X86ISD::CMP, then use it as the condition
4393 // setting operand in place of the X86ISD::SETCC.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004394 if (Cond.getOpcode() == X86ISD::SETCC) {
4395 CC = Cond.getOperand(0);
4396
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004397 SDOperand Cmp = Cond.getOperand(1);
4398 unsigned Opc = Cmp.getOpcode();
Evan Cheng50d37ab2007-10-08 22:16:29 +00004399 MVT::ValueType VT = Op.getValueType();
Chris Lattnerfca7f222008-01-16 06:19:45 +00004400
Evan Cheng50d37ab2007-10-08 22:16:29 +00004401 bool IllegalFPCMov = false;
Chris Lattnerfca7f222008-01-16 06:19:45 +00004402 if (MVT::isFloatingPoint(VT) && !MVT::isVector(VT) &&
Chris Lattnercf515b52008-01-16 06:24:21 +00004403 !isScalarFPTypeInSSEReg(VT)) // FPStack?
Evan Cheng50d37ab2007-10-08 22:16:29 +00004404 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended());
Chris Lattnerfca7f222008-01-16 06:19:45 +00004405
Evan Cheng621216e2007-09-29 00:00:36 +00004406 if ((Opc == X86ISD::CMP ||
4407 Opc == X86ISD::COMI ||
4408 Opc == X86ISD::UCOMI) && !IllegalFPCMov) {
Evan Cheng50d37ab2007-10-08 22:16:29 +00004409 Cond = Cmp;
Evan Cheng950aac02007-09-25 01:57:46 +00004410 addTest = false;
4411 }
4412 }
4413
4414 if (addTest) {
4415 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
Evan Cheng50d37ab2007-10-08 22:16:29 +00004416 Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8));
Evan Cheng950aac02007-09-25 01:57:46 +00004417 }
4418
4419 const MVT::ValueType *VTs = DAG.getNodeValueTypes(Op.getValueType(),
4420 MVT::Flag);
4421 SmallVector<SDOperand, 4> Ops;
4422 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
4423 // condition is true.
4424 Ops.push_back(Op.getOperand(2));
4425 Ops.push_back(Op.getOperand(1));
4426 Ops.push_back(CC);
4427 Ops.push_back(Cond);
Evan Cheng621216e2007-09-29 00:00:36 +00004428 return DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
Evan Cheng950aac02007-09-25 01:57:46 +00004429}
4430
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004431SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) {
4432 bool addTest = true;
4433 SDOperand Chain = Op.getOperand(0);
4434 SDOperand Cond = Op.getOperand(1);
4435 SDOperand Dest = Op.getOperand(2);
4436 SDOperand CC;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004437
4438 if (Cond.getOpcode() == ISD::SETCC)
Evan Cheng621216e2007-09-29 00:00:36 +00004439 Cond = LowerSETCC(Cond, DAG);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004440
Evan Cheng50d37ab2007-10-08 22:16:29 +00004441 // If condition flag is set by a X86ISD::CMP, then use it as the condition
4442 // setting operand in place of the X86ISD::SETCC.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004443 if (Cond.getOpcode() == X86ISD::SETCC) {
4444 CC = Cond.getOperand(0);
4445
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004446 SDOperand Cmp = Cond.getOperand(1);
4447 unsigned Opc = Cmp.getOpcode();
Evan Cheng621216e2007-09-29 00:00:36 +00004448 if (Opc == X86ISD::CMP ||
4449 Opc == X86ISD::COMI ||
4450 Opc == X86ISD::UCOMI) {
Evan Cheng50d37ab2007-10-08 22:16:29 +00004451 Cond = Cmp;
Evan Cheng950aac02007-09-25 01:57:46 +00004452 addTest = false;
4453 }
4454 }
4455
4456 if (addTest) {
4457 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
Evan Cheng621216e2007-09-29 00:00:36 +00004458 Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8));
Evan Cheng950aac02007-09-25 01:57:46 +00004459 }
Evan Cheng621216e2007-09-29 00:00:36 +00004460 return DAG.getNode(X86ISD::BRCOND, Op.getValueType(),
Evan Cheng950aac02007-09-25 01:57:46 +00004461 Chain, Op.getOperand(2), CC, Cond);
4462}
4463
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004464
4465// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
4466// Calls to _alloca is needed to probe the stack when allocating more than 4k
4467// bytes in one go. Touching the stack at 4K increments is necessary to ensure
4468// that the guard pages used by the OS virtual memory manager are allocated in
4469// correct sequence.
4470SDOperand
4471X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op,
4472 SelectionDAG &DAG) {
4473 assert(Subtarget->isTargetCygMing() &&
4474 "This should be used only on Cygwin/Mingw targets");
4475
4476 // Get the inputs.
4477 SDOperand Chain = Op.getOperand(0);
4478 SDOperand Size = Op.getOperand(1);
4479 // FIXME: Ensure alignment here
4480
4481 SDOperand Flag;
4482
4483 MVT::ValueType IntPtr = getPointerTy();
Chris Lattner5872a362008-01-17 07:00:52 +00004484 MVT::ValueType SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004485
4486 Chain = DAG.getCopyToReg(Chain, X86::EAX, Size, Flag);
4487 Flag = Chain.getValue(1);
4488
4489 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
4490 SDOperand Ops[] = { Chain,
4491 DAG.getTargetExternalSymbol("_alloca", IntPtr),
4492 DAG.getRegister(X86::EAX, IntPtr),
4493 Flag };
4494 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops, 4);
4495 Flag = Chain.getValue(1);
4496
4497 Chain = DAG.getCopyFromReg(Chain, X86StackPtr, SPTy).getValue(1);
4498
4499 std::vector<MVT::ValueType> Tys;
4500 Tys.push_back(SPTy);
4501 Tys.push_back(MVT::Other);
4502 SDOperand Ops1[2] = { Chain.getValue(0), Chain };
4503 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops1, 2);
4504}
4505
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004506SDOperand X86TargetLowering::LowerMEMSET(SDOperand Op, SelectionDAG &DAG) {
4507 SDOperand InFlag(0, 0);
4508 SDOperand Chain = Op.getOperand(0);
4509 unsigned Align =
4510 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue();
4511 if (Align == 0) Align = 1;
4512
4513 ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3));
Rafael Espindola5d3e7622007-08-27 10:18:20 +00004514 // If not DWORD aligned or size is more than the threshold, call memset.
Rafael Espindolab2e7a6b2007-08-27 17:48:26 +00004515 // The libc version is likely to be faster for these cases. It can use the
4516 // address value and run time information about the CPU.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004517 if ((Align & 3) != 0 ||
Rafael Espindola7afa9b12007-10-31 11:52:06 +00004518 (I && I->getValue() > Subtarget->getMaxInlineSizeThreshold())) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004519 MVT::ValueType IntPtr = getPointerTy();
4520 const Type *IntPtrTy = getTargetData()->getIntPtrType();
4521 TargetLowering::ArgListTy Args;
4522 TargetLowering::ArgListEntry Entry;
4523 Entry.Node = Op.getOperand(1);
4524 Entry.Ty = IntPtrTy;
4525 Args.push_back(Entry);
4526 // Extend the unsigned i8 argument to be an int value for the call.
4527 Entry.Node = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Op.getOperand(2));
4528 Entry.Ty = IntPtrTy;
4529 Args.push_back(Entry);
4530 Entry.Node = Op.getOperand(3);
4531 Args.push_back(Entry);
4532 std::pair<SDOperand,SDOperand> CallResult =
Duncan Sandsead972e2008-02-14 17:28:50 +00004533 LowerCallTo(Chain, Type::VoidTy, false, false, false, CallingConv::C,
4534 false, DAG.getExternalSymbol("memset", IntPtr), Args, DAG);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004535 return CallResult.second;
4536 }
4537
4538 MVT::ValueType AVT;
4539 SDOperand Count;
4540 ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4541 unsigned BytesLeft = 0;
4542 bool TwoRepStos = false;
4543 if (ValC) {
4544 unsigned ValReg;
4545 uint64_t Val = ValC->getValue() & 255;
4546
4547 // If the value is a constant, then we can potentially use larger sets.
4548 switch (Align & 3) {
4549 case 2: // WORD aligned
4550 AVT = MVT::i16;
4551 ValReg = X86::AX;
4552 Val = (Val << 8) | Val;
4553 break;
4554 case 0: // DWORD aligned
4555 AVT = MVT::i32;
4556 ValReg = X86::EAX;
4557 Val = (Val << 8) | Val;
4558 Val = (Val << 16) | Val;
4559 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) { // QWORD aligned
4560 AVT = MVT::i64;
4561 ValReg = X86::RAX;
4562 Val = (Val << 32) | Val;
4563 }
4564 break;
4565 default: // Byte aligned
4566 AVT = MVT::i8;
4567 ValReg = X86::AL;
4568 Count = Op.getOperand(3);
4569 break;
4570 }
4571
4572 if (AVT > MVT::i8) {
4573 if (I) {
4574 unsigned UBytes = MVT::getSizeInBits(AVT) / 8;
Chris Lattner5872a362008-01-17 07:00:52 +00004575 Count = DAG.getIntPtrConstant(I->getValue() / UBytes);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004576 BytesLeft = I->getValue() % UBytes;
4577 } else {
4578 assert(AVT >= MVT::i32 &&
4579 "Do not use rep;stos if not at least DWORD aligned");
4580 Count = DAG.getNode(ISD::SRL, Op.getOperand(3).getValueType(),
4581 Op.getOperand(3), DAG.getConstant(2, MVT::i8));
4582 TwoRepStos = true;
4583 }
4584 }
4585
4586 Chain = DAG.getCopyToReg(Chain, ValReg, DAG.getConstant(Val, AVT),
4587 InFlag);
4588 InFlag = Chain.getValue(1);
4589 } else {
4590 AVT = MVT::i8;
4591 Count = Op.getOperand(3);
4592 Chain = DAG.getCopyToReg(Chain, X86::AL, Op.getOperand(2), InFlag);
4593 InFlag = Chain.getValue(1);
4594 }
4595
4596 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX,
4597 Count, InFlag);
4598 InFlag = Chain.getValue(1);
4599 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI,
4600 Op.getOperand(1), InFlag);
4601 InFlag = Chain.getValue(1);
4602
4603 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
4604 SmallVector<SDOperand, 8> Ops;
4605 Ops.push_back(Chain);
4606 Ops.push_back(DAG.getValueType(AVT));
4607 Ops.push_back(InFlag);
4608 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size());
4609
4610 if (TwoRepStos) {
4611 InFlag = Chain.getValue(1);
4612 Count = Op.getOperand(3);
4613 MVT::ValueType CVT = Count.getValueType();
4614 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count,
4615 DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT));
4616 Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX,
4617 Left, InFlag);
4618 InFlag = Chain.getValue(1);
4619 Tys = DAG.getVTList(MVT::Other, MVT::Flag);
4620 Ops.clear();
4621 Ops.push_back(Chain);
4622 Ops.push_back(DAG.getValueType(MVT::i8));
4623 Ops.push_back(InFlag);
4624 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size());
4625 } else if (BytesLeft) {
4626 // Issue stores for the last 1 - 7 bytes.
4627 SDOperand Value;
4628 unsigned Val = ValC->getValue() & 255;
4629 unsigned Offset = I->getValue() - BytesLeft;
4630 SDOperand DstAddr = Op.getOperand(1);
4631 MVT::ValueType AddrVT = DstAddr.getValueType();
4632 if (BytesLeft >= 4) {
4633 Val = (Val << 8) | Val;
4634 Val = (Val << 16) | Val;
4635 Value = DAG.getConstant(Val, MVT::i32);
4636 Chain = DAG.getStore(Chain, Value,
4637 DAG.getNode(ISD::ADD, AddrVT, DstAddr,
4638 DAG.getConstant(Offset, AddrVT)),
4639 NULL, 0);
4640 BytesLeft -= 4;
4641 Offset += 4;
4642 }
4643 if (BytesLeft >= 2) {
4644 Value = DAG.getConstant((Val << 8) | Val, MVT::i16);
4645 Chain = DAG.getStore(Chain, Value,
4646 DAG.getNode(ISD::ADD, AddrVT, DstAddr,
4647 DAG.getConstant(Offset, AddrVT)),
4648 NULL, 0);
4649 BytesLeft -= 2;
4650 Offset += 2;
4651 }
4652 if (BytesLeft == 1) {
4653 Value = DAG.getConstant(Val, MVT::i8);
4654 Chain = DAG.getStore(Chain, Value,
4655 DAG.getNode(ISD::ADD, AddrVT, DstAddr,
4656 DAG.getConstant(Offset, AddrVT)),
4657 NULL, 0);
4658 }
4659 }
4660
4661 return Chain;
4662}
4663
Rafael Espindolaf12f3a92007-09-28 12:53:01 +00004664SDOperand X86TargetLowering::LowerMEMCPYInline(SDOperand Chain,
4665 SDOperand Dest,
4666 SDOperand Source,
4667 unsigned Size,
4668 unsigned Align,
4669 SelectionDAG &DAG) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004670 MVT::ValueType AVT;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004671 unsigned BytesLeft = 0;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004672 switch (Align & 3) {
4673 case 2: // WORD aligned
4674 AVT = MVT::i16;
4675 break;
4676 case 0: // DWORD aligned
4677 AVT = MVT::i32;
4678 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) // QWORD aligned
4679 AVT = MVT::i64;
4680 break;
4681 default: // Byte aligned
4682 AVT = MVT::i8;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004683 break;
4684 }
4685
Rafael Espindolaf12f3a92007-09-28 12:53:01 +00004686 unsigned UBytes = MVT::getSizeInBits(AVT) / 8;
Chris Lattner5872a362008-01-17 07:00:52 +00004687 SDOperand Count = DAG.getIntPtrConstant(Size / UBytes);
Rafael Espindolaf12f3a92007-09-28 12:53:01 +00004688 BytesLeft = Size % UBytes;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004689
4690 SDOperand InFlag(0, 0);
4691 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX,
4692 Count, InFlag);
4693 InFlag = Chain.getValue(1);
4694 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI,
Rafael Espindolaf12f3a92007-09-28 12:53:01 +00004695 Dest, InFlag);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004696 InFlag = Chain.getValue(1);
4697 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RSI : X86::ESI,
Rafael Espindolaf12f3a92007-09-28 12:53:01 +00004698 Source, InFlag);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004699 InFlag = Chain.getValue(1);
4700
4701 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
4702 SmallVector<SDOperand, 8> Ops;
4703 Ops.push_back(Chain);
4704 Ops.push_back(DAG.getValueType(AVT));
4705 Ops.push_back(InFlag);
4706 Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size());
4707
Rafael Espindolaf12f3a92007-09-28 12:53:01 +00004708 if (BytesLeft) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004709 // Issue loads and stores for the last 1 - 7 bytes.
Rafael Espindolaf12f3a92007-09-28 12:53:01 +00004710 unsigned Offset = Size - BytesLeft;
4711 SDOperand DstAddr = Dest;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004712 MVT::ValueType DstVT = DstAddr.getValueType();
Rafael Espindolaf12f3a92007-09-28 12:53:01 +00004713 SDOperand SrcAddr = Source;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004714 MVT::ValueType SrcVT = SrcAddr.getValueType();
4715 SDOperand Value;
4716 if (BytesLeft >= 4) {
4717 Value = DAG.getLoad(MVT::i32, Chain,
4718 DAG.getNode(ISD::ADD, SrcVT, SrcAddr,
4719 DAG.getConstant(Offset, SrcVT)),
4720 NULL, 0);
4721 Chain = Value.getValue(1);
4722 Chain = DAG.getStore(Chain, Value,
4723 DAG.getNode(ISD::ADD, DstVT, DstAddr,
4724 DAG.getConstant(Offset, DstVT)),
4725 NULL, 0);
4726 BytesLeft -= 4;
4727 Offset += 4;
4728 }
4729 if (BytesLeft >= 2) {
4730 Value = DAG.getLoad(MVT::i16, Chain,
4731 DAG.getNode(ISD::ADD, SrcVT, SrcAddr,
4732 DAG.getConstant(Offset, SrcVT)),
4733 NULL, 0);
4734 Chain = Value.getValue(1);
4735 Chain = DAG.getStore(Chain, Value,
4736 DAG.getNode(ISD::ADD, DstVT, DstAddr,
4737 DAG.getConstant(Offset, DstVT)),
4738 NULL, 0);
4739 BytesLeft -= 2;
4740 Offset += 2;
4741 }
4742
4743 if (BytesLeft == 1) {
4744 Value = DAG.getLoad(MVT::i8, Chain,
4745 DAG.getNode(ISD::ADD, SrcVT, SrcAddr,
4746 DAG.getConstant(Offset, SrcVT)),
4747 NULL, 0);
4748 Chain = Value.getValue(1);
4749 Chain = DAG.getStore(Chain, Value,
4750 DAG.getNode(ISD::ADD, DstVT, DstAddr,
4751 DAG.getConstant(Offset, DstVT)),
4752 NULL, 0);
4753 }
4754 }
4755
4756 return Chain;
4757}
4758
Chris Lattnerdfb947d2007-11-24 07:07:01 +00004759/// Expand the result of: i64,outchain = READCYCLECOUNTER inchain
4760SDNode *X86TargetLowering::ExpandREADCYCLECOUNTER(SDNode *N, SelectionDAG &DAG){
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004761 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
Chris Lattnerdfb947d2007-11-24 07:07:01 +00004762 SDOperand TheChain = N->getOperand(0);
4763 SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, &TheChain, 1);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004764 if (Subtarget->is64Bit()) {
Chris Lattnerdfb947d2007-11-24 07:07:01 +00004765 SDOperand rax = DAG.getCopyFromReg(rd, X86::RAX, MVT::i64, rd.getValue(1));
4766 SDOperand rdx = DAG.getCopyFromReg(rax.getValue(1), X86::RDX,
4767 MVT::i64, rax.getValue(2));
4768 SDOperand Tmp = DAG.getNode(ISD::SHL, MVT::i64, rdx,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004769 DAG.getConstant(32, MVT::i8));
4770 SDOperand Ops[] = {
Chris Lattnerdfb947d2007-11-24 07:07:01 +00004771 DAG.getNode(ISD::OR, MVT::i64, rax, Tmp), rdx.getValue(1)
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004772 };
4773
4774 Tys = DAG.getVTList(MVT::i64, MVT::Other);
Chris Lattnerdfb947d2007-11-24 07:07:01 +00004775 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2).Val;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004776 }
4777
Chris Lattnerdfb947d2007-11-24 07:07:01 +00004778 SDOperand eax = DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1));
4779 SDOperand edx = DAG.getCopyFromReg(eax.getValue(1), X86::EDX,
4780 MVT::i32, eax.getValue(2));
4781 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
4782 SDOperand Ops[] = { eax, edx };
4783 Ops[0] = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Ops, 2);
4784
4785 // Use a MERGE_VALUES to return the value and chain.
4786 Ops[1] = edx.getValue(1);
4787 Tys = DAG.getVTList(MVT::i64, MVT::Other);
4788 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2).Val;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004789}
4790
4791SDOperand X86TargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG) {
Dan Gohman12a9c082008-02-06 22:27:42 +00004792 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004793
4794 if (!Subtarget->is64Bit()) {
4795 // vastart just stores the address of the VarArgsFrameIndex slot into the
4796 // memory location argument.
4797 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy());
Dan Gohman12a9c082008-02-06 22:27:42 +00004798 return DAG.getStore(Op.getOperand(0), FR,Op.getOperand(1), SV, 0);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004799 }
4800
4801 // __va_list_tag:
4802 // gp_offset (0 - 6 * 8)
4803 // fp_offset (48 - 48 + 8 * 16)
4804 // overflow_arg_area (point to parameters coming in memory).
4805 // reg_save_area
4806 SmallVector<SDOperand, 8> MemOps;
4807 SDOperand FIN = Op.getOperand(1);
4808 // Store gp_offset
4809 SDOperand Store = DAG.getStore(Op.getOperand(0),
4810 DAG.getConstant(VarArgsGPOffset, MVT::i32),
Dan Gohman12a9c082008-02-06 22:27:42 +00004811 FIN, SV, 0);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004812 MemOps.push_back(Store);
4813
4814 // Store fp_offset
Chris Lattner5872a362008-01-17 07:00:52 +00004815 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(4));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004816 Store = DAG.getStore(Op.getOperand(0),
4817 DAG.getConstant(VarArgsFPOffset, MVT::i32),
Dan Gohman12a9c082008-02-06 22:27:42 +00004818 FIN, SV, 0);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004819 MemOps.push_back(Store);
4820
4821 // Store ptr to overflow_arg_area
Chris Lattner5872a362008-01-17 07:00:52 +00004822 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(4));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004823 SDOperand OVFIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy());
Dan Gohman12a9c082008-02-06 22:27:42 +00004824 Store = DAG.getStore(Op.getOperand(0), OVFIN, FIN, SV, 0);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004825 MemOps.push_back(Store);
4826
4827 // Store ptr to reg_save_area.
Chris Lattner5872a362008-01-17 07:00:52 +00004828 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(8));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004829 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy());
Dan Gohman12a9c082008-02-06 22:27:42 +00004830 Store = DAG.getStore(Op.getOperand(0), RSFIN, FIN, SV, 0);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004831 MemOps.push_back(Store);
4832 return DAG.getNode(ISD::TokenFactor, MVT::Other, &MemOps[0], MemOps.size());
4833}
4834
4835SDOperand X86TargetLowering::LowerVACOPY(SDOperand Op, SelectionDAG &DAG) {
4836 // X86-64 va_list is a struct { i32, i32, i8*, i8* }.
4837 SDOperand Chain = Op.getOperand(0);
4838 SDOperand DstPtr = Op.getOperand(1);
4839 SDOperand SrcPtr = Op.getOperand(2);
Dan Gohman12a9c082008-02-06 22:27:42 +00004840 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
4841 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004842
Dan Gohman12a9c082008-02-06 22:27:42 +00004843 SrcPtr = DAG.getLoad(getPointerTy(), Chain, SrcPtr, SrcSV, 0);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004844 Chain = SrcPtr.getValue(1);
4845 for (unsigned i = 0; i < 3; ++i) {
Dan Gohman12a9c082008-02-06 22:27:42 +00004846 SDOperand Val = DAG.getLoad(MVT::i64, Chain, SrcPtr, SrcSV, 0);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004847 Chain = Val.getValue(1);
Dan Gohman12a9c082008-02-06 22:27:42 +00004848 Chain = DAG.getStore(Chain, Val, DstPtr, DstSV, 0);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004849 if (i == 2)
4850 break;
4851 SrcPtr = DAG.getNode(ISD::ADD, getPointerTy(), SrcPtr,
Chris Lattner5872a362008-01-17 07:00:52 +00004852 DAG.getIntPtrConstant(8));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004853 DstPtr = DAG.getNode(ISD::ADD, getPointerTy(), DstPtr,
Chris Lattner5872a362008-01-17 07:00:52 +00004854 DAG.getIntPtrConstant(8));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004855 }
4856 return Chain;
4857}
4858
4859SDOperand
4860X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) {
4861 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue();
4862 switch (IntNo) {
4863 default: return SDOperand(); // Don't custom lower most intrinsics.
4864 // Comparison intrinsics.
4865 case Intrinsic::x86_sse_comieq_ss:
4866 case Intrinsic::x86_sse_comilt_ss:
4867 case Intrinsic::x86_sse_comile_ss:
4868 case Intrinsic::x86_sse_comigt_ss:
4869 case Intrinsic::x86_sse_comige_ss:
4870 case Intrinsic::x86_sse_comineq_ss:
4871 case Intrinsic::x86_sse_ucomieq_ss:
4872 case Intrinsic::x86_sse_ucomilt_ss:
4873 case Intrinsic::x86_sse_ucomile_ss:
4874 case Intrinsic::x86_sse_ucomigt_ss:
4875 case Intrinsic::x86_sse_ucomige_ss:
4876 case Intrinsic::x86_sse_ucomineq_ss:
4877 case Intrinsic::x86_sse2_comieq_sd:
4878 case Intrinsic::x86_sse2_comilt_sd:
4879 case Intrinsic::x86_sse2_comile_sd:
4880 case Intrinsic::x86_sse2_comigt_sd:
4881 case Intrinsic::x86_sse2_comige_sd:
4882 case Intrinsic::x86_sse2_comineq_sd:
4883 case Intrinsic::x86_sse2_ucomieq_sd:
4884 case Intrinsic::x86_sse2_ucomilt_sd:
4885 case Intrinsic::x86_sse2_ucomile_sd:
4886 case Intrinsic::x86_sse2_ucomigt_sd:
4887 case Intrinsic::x86_sse2_ucomige_sd:
4888 case Intrinsic::x86_sse2_ucomineq_sd: {
4889 unsigned Opc = 0;
4890 ISD::CondCode CC = ISD::SETCC_INVALID;
4891 switch (IntNo) {
4892 default: break;
4893 case Intrinsic::x86_sse_comieq_ss:
4894 case Intrinsic::x86_sse2_comieq_sd:
4895 Opc = X86ISD::COMI;
4896 CC = ISD::SETEQ;
4897 break;
4898 case Intrinsic::x86_sse_comilt_ss:
4899 case Intrinsic::x86_sse2_comilt_sd:
4900 Opc = X86ISD::COMI;
4901 CC = ISD::SETLT;
4902 break;
4903 case Intrinsic::x86_sse_comile_ss:
4904 case Intrinsic::x86_sse2_comile_sd:
4905 Opc = X86ISD::COMI;
4906 CC = ISD::SETLE;
4907 break;
4908 case Intrinsic::x86_sse_comigt_ss:
4909 case Intrinsic::x86_sse2_comigt_sd:
4910 Opc = X86ISD::COMI;
4911 CC = ISD::SETGT;
4912 break;
4913 case Intrinsic::x86_sse_comige_ss:
4914 case Intrinsic::x86_sse2_comige_sd:
4915 Opc = X86ISD::COMI;
4916 CC = ISD::SETGE;
4917 break;
4918 case Intrinsic::x86_sse_comineq_ss:
4919 case Intrinsic::x86_sse2_comineq_sd:
4920 Opc = X86ISD::COMI;
4921 CC = ISD::SETNE;
4922 break;
4923 case Intrinsic::x86_sse_ucomieq_ss:
4924 case Intrinsic::x86_sse2_ucomieq_sd:
4925 Opc = X86ISD::UCOMI;
4926 CC = ISD::SETEQ;
4927 break;
4928 case Intrinsic::x86_sse_ucomilt_ss:
4929 case Intrinsic::x86_sse2_ucomilt_sd:
4930 Opc = X86ISD::UCOMI;
4931 CC = ISD::SETLT;
4932 break;
4933 case Intrinsic::x86_sse_ucomile_ss:
4934 case Intrinsic::x86_sse2_ucomile_sd:
4935 Opc = X86ISD::UCOMI;
4936 CC = ISD::SETLE;
4937 break;
4938 case Intrinsic::x86_sse_ucomigt_ss:
4939 case Intrinsic::x86_sse2_ucomigt_sd:
4940 Opc = X86ISD::UCOMI;
4941 CC = ISD::SETGT;
4942 break;
4943 case Intrinsic::x86_sse_ucomige_ss:
4944 case Intrinsic::x86_sse2_ucomige_sd:
4945 Opc = X86ISD::UCOMI;
4946 CC = ISD::SETGE;
4947 break;
4948 case Intrinsic::x86_sse_ucomineq_ss:
4949 case Intrinsic::x86_sse2_ucomineq_sd:
4950 Opc = X86ISD::UCOMI;
4951 CC = ISD::SETNE;
4952 break;
4953 }
4954
4955 unsigned X86CC;
4956 SDOperand LHS = Op.getOperand(1);
4957 SDOperand RHS = Op.getOperand(2);
4958 translateX86CC(CC, true, X86CC, LHS, RHS, DAG);
4959
Evan Cheng621216e2007-09-29 00:00:36 +00004960 SDOperand Cond = DAG.getNode(Opc, MVT::i32, LHS, RHS);
4961 SDOperand SetCC = DAG.getNode(X86ISD::SETCC, MVT::i8,
4962 DAG.getConstant(X86CC, MVT::i8), Cond);
4963 return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004964 }
4965 }
4966}
4967
4968SDOperand X86TargetLowering::LowerRETURNADDR(SDOperand Op, SelectionDAG &DAG) {
4969 // Depths > 0 not supported yet!
4970 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0)
4971 return SDOperand();
4972
4973 // Just load the return address
4974 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG);
4975 return DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI, NULL, 0);
4976}
4977
4978SDOperand X86TargetLowering::LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG) {
4979 // Depths > 0 not supported yet!
4980 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0)
4981 return SDOperand();
4982
4983 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG);
4984 return DAG.getNode(ISD::SUB, getPointerTy(), RetAddrFI,
Chris Lattner5872a362008-01-17 07:00:52 +00004985 DAG.getIntPtrConstant(4));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004986}
4987
4988SDOperand X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDOperand Op,
4989 SelectionDAG &DAG) {
4990 // Is not yet supported on x86-64
4991 if (Subtarget->is64Bit())
4992 return SDOperand();
4993
Chris Lattner5872a362008-01-17 07:00:52 +00004994 return DAG.getIntPtrConstant(8);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00004995}
4996
4997SDOperand X86TargetLowering::LowerEH_RETURN(SDOperand Op, SelectionDAG &DAG)
4998{
4999 assert(!Subtarget->is64Bit() &&
5000 "Lowering of eh_return builtin is not supported yet on x86-64");
5001
5002 MachineFunction &MF = DAG.getMachineFunction();
5003 SDOperand Chain = Op.getOperand(0);
5004 SDOperand Offset = Op.getOperand(1);
5005 SDOperand Handler = Op.getOperand(2);
5006
5007 SDOperand Frame = DAG.getRegister(RegInfo->getFrameRegister(MF),
5008 getPointerTy());
5009
5010 SDOperand StoreAddr = DAG.getNode(ISD::SUB, getPointerTy(), Frame,
Chris Lattner5872a362008-01-17 07:00:52 +00005011 DAG.getIntPtrConstant(-4UL));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005012 StoreAddr = DAG.getNode(ISD::ADD, getPointerTy(), StoreAddr, Offset);
5013 Chain = DAG.getStore(Chain, Handler, StoreAddr, NULL, 0);
5014 Chain = DAG.getCopyToReg(Chain, X86::ECX, StoreAddr);
Chris Lattner1b989192007-12-31 04:13:23 +00005015 MF.getRegInfo().addLiveOut(X86::ECX);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005016
5017 return DAG.getNode(X86ISD::EH_RETURN, MVT::Other,
5018 Chain, DAG.getRegister(X86::ECX, getPointerTy()));
5019}
5020
Duncan Sandsd8455ca2007-07-27 20:02:49 +00005021SDOperand X86TargetLowering::LowerTRAMPOLINE(SDOperand Op,
5022 SelectionDAG &DAG) {
5023 SDOperand Root = Op.getOperand(0);
5024 SDOperand Trmp = Op.getOperand(1); // trampoline
5025 SDOperand FPtr = Op.getOperand(2); // nested function
5026 SDOperand Nest = Op.getOperand(3); // 'nest' parameter value
5027
Dan Gohman12a9c082008-02-06 22:27:42 +00005028 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
Duncan Sandsd8455ca2007-07-27 20:02:49 +00005029
Duncan Sands3e8ff6f2008-01-16 22:55:25 +00005030 const X86InstrInfo *TII =
5031 ((X86TargetMachine&)getTargetMachine()).getInstrInfo();
5032
Duncan Sandsd8455ca2007-07-27 20:02:49 +00005033 if (Subtarget->is64Bit()) {
Duncan Sands3e8ff6f2008-01-16 22:55:25 +00005034 SDOperand OutChains[6];
5035
5036 // Large code-model.
5037
5038 const unsigned char JMP64r = TII->getBaseOpcodeFor(X86::JMP64r);
5039 const unsigned char MOV64ri = TII->getBaseOpcodeFor(X86::MOV64ri);
5040
5041 const unsigned char N86R10 =
Dan Gohman06844672008-02-08 03:29:40 +00005042 ((const X86RegisterInfo*)RegInfo)->getX86RegNum(X86::R10);
Duncan Sands3e8ff6f2008-01-16 22:55:25 +00005043 const unsigned char N86R11 =
Dan Gohman06844672008-02-08 03:29:40 +00005044 ((const X86RegisterInfo*)RegInfo)->getX86RegNum(X86::R11);
Duncan Sands3e8ff6f2008-01-16 22:55:25 +00005045
5046 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
5047
5048 // Load the pointer to the nested function into R11.
5049 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
5050 SDOperand Addr = Trmp;
5051 OutChains[0] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr,
Dan Gohman12a9c082008-02-06 22:27:42 +00005052 TrmpAddr, 0);
Duncan Sands3e8ff6f2008-01-16 22:55:25 +00005053
5054 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(2, MVT::i64));
Dan Gohman12a9c082008-02-06 22:27:42 +00005055 OutChains[1] = DAG.getStore(Root, FPtr, Addr, TrmpAddr, 2, false, 2);
Duncan Sands3e8ff6f2008-01-16 22:55:25 +00005056
5057 // Load the 'nest' parameter value into R10.
5058 // R10 is specified in X86CallingConv.td
5059 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
5060 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(10, MVT::i64));
5061 OutChains[2] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr,
Dan Gohman12a9c082008-02-06 22:27:42 +00005062 TrmpAddr, 10);
Duncan Sands3e8ff6f2008-01-16 22:55:25 +00005063
5064 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(12, MVT::i64));
Dan Gohman12a9c082008-02-06 22:27:42 +00005065 OutChains[3] = DAG.getStore(Root, Nest, Addr, TrmpAddr, 12, false, 2);
Duncan Sands3e8ff6f2008-01-16 22:55:25 +00005066
5067 // Jump to the nested function.
5068 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
5069 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(20, MVT::i64));
5070 OutChains[4] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr,
Dan Gohman12a9c082008-02-06 22:27:42 +00005071 TrmpAddr, 20);
Duncan Sands3e8ff6f2008-01-16 22:55:25 +00005072
5073 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
5074 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(22, MVT::i64));
5075 OutChains[5] = DAG.getStore(Root, DAG.getConstant(ModRM, MVT::i8), Addr,
Dan Gohman12a9c082008-02-06 22:27:42 +00005076 TrmpAddr, 22);
Duncan Sands3e8ff6f2008-01-16 22:55:25 +00005077
5078 SDOperand Ops[] =
5079 { Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 6) };
5080 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), Ops, 2);
Duncan Sandsd8455ca2007-07-27 20:02:49 +00005081 } else {
Dan Gohman0bd70702008-01-31 01:01:48 +00005082 const Function *Func =
Duncan Sandsd8455ca2007-07-27 20:02:49 +00005083 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
5084 unsigned CC = Func->getCallingConv();
Duncan Sands466eadd2007-08-29 19:01:20 +00005085 unsigned NestReg;
Duncan Sandsd8455ca2007-07-27 20:02:49 +00005086
5087 switch (CC) {
5088 default:
5089 assert(0 && "Unsupported calling convention");
5090 case CallingConv::C:
Duncan Sandsd8455ca2007-07-27 20:02:49 +00005091 case CallingConv::X86_StdCall: {
5092 // Pass 'nest' parameter in ECX.
5093 // Must be kept in sync with X86CallingConv.td
Duncan Sands466eadd2007-08-29 19:01:20 +00005094 NestReg = X86::ECX;
Duncan Sandsd8455ca2007-07-27 20:02:49 +00005095
5096 // Check that ECX wasn't needed by an 'inreg' parameter.
5097 const FunctionType *FTy = Func->getFunctionType();
Duncan Sandsf5588dc2007-11-27 13:23:08 +00005098 const ParamAttrsList *Attrs = Func->getParamAttrs();
Duncan Sandsd8455ca2007-07-27 20:02:49 +00005099
5100 if (Attrs && !Func->isVarArg()) {
5101 unsigned InRegCount = 0;
5102 unsigned Idx = 1;
5103
5104 for (FunctionType::param_iterator I = FTy->param_begin(),
5105 E = FTy->param_end(); I != E; ++I, ++Idx)
5106 if (Attrs->paramHasAttr(Idx, ParamAttr::InReg))
5107 // FIXME: should only count parameters that are lowered to integers.
5108 InRegCount += (getTargetData()->getTypeSizeInBits(*I) + 31) / 32;
5109
5110 if (InRegCount > 2) {
5111 cerr << "Nest register in use - reduce number of inreg parameters!\n";
5112 abort();
5113 }
5114 }
5115 break;
5116 }
5117 case CallingConv::X86_FastCall:
5118 // Pass 'nest' parameter in EAX.
5119 // Must be kept in sync with X86CallingConv.td
Duncan Sands466eadd2007-08-29 19:01:20 +00005120 NestReg = X86::EAX;
Duncan Sandsd8455ca2007-07-27 20:02:49 +00005121 break;
5122 }
5123
5124 SDOperand OutChains[4];
5125 SDOperand Addr, Disp;
5126
5127 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(10, MVT::i32));
5128 Disp = DAG.getNode(ISD::SUB, MVT::i32, FPtr, Addr);
5129
Duncan Sands3e8ff6f2008-01-16 22:55:25 +00005130 const unsigned char MOV32ri = TII->getBaseOpcodeFor(X86::MOV32ri);
5131 const unsigned char N86Reg =
Dan Gohman06844672008-02-08 03:29:40 +00005132 ((const X86RegisterInfo*)RegInfo)->getX86RegNum(NestReg);
Duncan Sands466eadd2007-08-29 19:01:20 +00005133 OutChains[0] = DAG.getStore(Root, DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
Dan Gohman12a9c082008-02-06 22:27:42 +00005134 Trmp, TrmpAddr, 0);
Duncan Sandsd8455ca2007-07-27 20:02:49 +00005135
5136 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(1, MVT::i32));
Dan Gohman12a9c082008-02-06 22:27:42 +00005137 OutChains[1] = DAG.getStore(Root, Nest, Addr, TrmpAddr, 1, false, 1);
Duncan Sandsd8455ca2007-07-27 20:02:49 +00005138
Duncan Sands3e8ff6f2008-01-16 22:55:25 +00005139 const unsigned char JMP = TII->getBaseOpcodeFor(X86::JMP);
Duncan Sandsd8455ca2007-07-27 20:02:49 +00005140 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(5, MVT::i32));
5141 OutChains[2] = DAG.getStore(Root, DAG.getConstant(JMP, MVT::i8), Addr,
Dan Gohman12a9c082008-02-06 22:27:42 +00005142 TrmpAddr, 5, false, 1);
Duncan Sandsd8455ca2007-07-27 20:02:49 +00005143
5144 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(6, MVT::i32));
Dan Gohman12a9c082008-02-06 22:27:42 +00005145 OutChains[3] = DAG.getStore(Root, Disp, Addr, TrmpAddr, 6, false, 1);
Duncan Sandsd8455ca2007-07-27 20:02:49 +00005146
Duncan Sands7407a9f2007-09-11 14:10:23 +00005147 SDOperand Ops[] =
5148 { Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 4) };
5149 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), Ops, 2);
Duncan Sandsd8455ca2007-07-27 20:02:49 +00005150 }
5151}
5152
Dan Gohman819574c2008-01-31 00:41:03 +00005153SDOperand X86TargetLowering::LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG) {
Anton Korobeynikovfbe230e2007-11-16 01:31:51 +00005154 /*
5155 The rounding mode is in bits 11:10 of FPSR, and has the following
5156 settings:
5157 00 Round to nearest
5158 01 Round to -inf
5159 10 Round to +inf
5160 11 Round to 0
5161
5162 FLT_ROUNDS, on the other hand, expects the following:
5163 -1 Undefined
5164 0 Round to 0
5165 1 Round to nearest
5166 2 Round to +inf
5167 3 Round to -inf
5168
5169 To perform the conversion, we do:
5170 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
5171 */
5172
5173 MachineFunction &MF = DAG.getMachineFunction();
5174 const TargetMachine &TM = MF.getTarget();
5175 const TargetFrameInfo &TFI = *TM.getFrameInfo();
5176 unsigned StackAlignment = TFI.getStackAlignment();
5177 MVT::ValueType VT = Op.getValueType();
5178
5179 // Save FP Control Word to stack slot
5180 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment);
5181 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
5182
5183 SDOperand Chain = DAG.getNode(X86ISD::FNSTCW16m, MVT::Other,
5184 DAG.getEntryNode(), StackSlot);
5185
5186 // Load FP Control Word from stack slot
5187 SDOperand CWD = DAG.getLoad(MVT::i16, Chain, StackSlot, NULL, 0);
5188
5189 // Transform as necessary
5190 SDOperand CWD1 =
5191 DAG.getNode(ISD::SRL, MVT::i16,
5192 DAG.getNode(ISD::AND, MVT::i16,
5193 CWD, DAG.getConstant(0x800, MVT::i16)),
5194 DAG.getConstant(11, MVT::i8));
5195 SDOperand CWD2 =
5196 DAG.getNode(ISD::SRL, MVT::i16,
5197 DAG.getNode(ISD::AND, MVT::i16,
5198 CWD, DAG.getConstant(0x400, MVT::i16)),
5199 DAG.getConstant(9, MVT::i8));
5200
5201 SDOperand RetVal =
5202 DAG.getNode(ISD::AND, MVT::i16,
5203 DAG.getNode(ISD::ADD, MVT::i16,
5204 DAG.getNode(ISD::OR, MVT::i16, CWD1, CWD2),
5205 DAG.getConstant(1, MVT::i16)),
5206 DAG.getConstant(3, MVT::i16));
5207
5208
5209 return DAG.getNode((MVT::getSizeInBits(VT) < 16 ?
5210 ISD::TRUNCATE : ISD::ZERO_EXTEND), VT, RetVal);
5211}
5212
Evan Cheng48679f42007-12-14 02:13:44 +00005213SDOperand X86TargetLowering::LowerCTLZ(SDOperand Op, SelectionDAG &DAG) {
5214 MVT::ValueType VT = Op.getValueType();
5215 MVT::ValueType OpVT = VT;
5216 unsigned NumBits = MVT::getSizeInBits(VT);
5217
5218 Op = Op.getOperand(0);
5219 if (VT == MVT::i8) {
Evan Cheng7cfbfe32007-12-14 08:30:15 +00005220 // Zero extend to i32 since there is not an i8 bsr.
Evan Cheng48679f42007-12-14 02:13:44 +00005221 OpVT = MVT::i32;
5222 Op = DAG.getNode(ISD::ZERO_EXTEND, OpVT, Op);
5223 }
Evan Cheng48679f42007-12-14 02:13:44 +00005224
Evan Cheng7cfbfe32007-12-14 08:30:15 +00005225 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
5226 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
5227 Op = DAG.getNode(X86ISD::BSR, VTs, Op);
5228
5229 // If src is zero (i.e. bsr sets ZF), returns NumBits.
5230 SmallVector<SDOperand, 4> Ops;
5231 Ops.push_back(Op);
5232 Ops.push_back(DAG.getConstant(NumBits+NumBits-1, OpVT));
5233 Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8));
5234 Ops.push_back(Op.getValue(1));
5235 Op = DAG.getNode(X86ISD::CMOV, OpVT, &Ops[0], 4);
5236
5237 // Finally xor with NumBits-1.
5238 Op = DAG.getNode(ISD::XOR, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
5239
Evan Cheng48679f42007-12-14 02:13:44 +00005240 if (VT == MVT::i8)
5241 Op = DAG.getNode(ISD::TRUNCATE, MVT::i8, Op);
5242 return Op;
5243}
5244
5245SDOperand X86TargetLowering::LowerCTTZ(SDOperand Op, SelectionDAG &DAG) {
5246 MVT::ValueType VT = Op.getValueType();
5247 MVT::ValueType OpVT = VT;
Evan Cheng7cfbfe32007-12-14 08:30:15 +00005248 unsigned NumBits = MVT::getSizeInBits(VT);
Evan Cheng48679f42007-12-14 02:13:44 +00005249
5250 Op = Op.getOperand(0);
5251 if (VT == MVT::i8) {
5252 OpVT = MVT::i32;
5253 Op = DAG.getNode(ISD::ZERO_EXTEND, OpVT, Op);
5254 }
Evan Cheng7cfbfe32007-12-14 08:30:15 +00005255
5256 // Issue a bsf (scan bits forward) which also sets EFLAGS.
5257 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
5258 Op = DAG.getNode(X86ISD::BSF, VTs, Op);
5259
5260 // If src is zero (i.e. bsf sets ZF), returns NumBits.
5261 SmallVector<SDOperand, 4> Ops;
5262 Ops.push_back(Op);
5263 Ops.push_back(DAG.getConstant(NumBits, OpVT));
5264 Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8));
5265 Ops.push_back(Op.getValue(1));
5266 Op = DAG.getNode(X86ISD::CMOV, OpVT, &Ops[0], 4);
5267
Evan Cheng48679f42007-12-14 02:13:44 +00005268 if (VT == MVT::i8)
5269 Op = DAG.getNode(ISD::TRUNCATE, MVT::i8, Op);
5270 return Op;
5271}
5272
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005273/// LowerOperation - Provide custom lowering hooks for some operations.
5274///
5275SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
5276 switch (Op.getOpcode()) {
5277 default: assert(0 && "Should not custom lower this!");
5278 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
5279 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
5280 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
5281 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
5282 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
5283 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
5284 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
5285 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
5286 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
5287 case ISD::SHL_PARTS:
5288 case ISD::SRA_PARTS:
5289 case ISD::SRL_PARTS: return LowerShift(Op, DAG);
5290 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
5291 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
5292 case ISD::FABS: return LowerFABS(Op, DAG);
5293 case ISD::FNEG: return LowerFNEG(Op, DAG);
5294 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
Evan Cheng621216e2007-09-29 00:00:36 +00005295 case ISD::SETCC: return LowerSETCC(Op, DAG);
5296 case ISD::SELECT: return LowerSELECT(Op, DAG);
5297 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005298 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
5299 case ISD::CALL: return LowerCALL(Op, DAG);
5300 case ISD::RET: return LowerRET(Op, DAG);
5301 case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG);
5302 case ISD::MEMSET: return LowerMEMSET(Op, DAG);
5303 case ISD::MEMCPY: return LowerMEMCPY(Op, DAG);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005304 case ISD::VASTART: return LowerVASTART(Op, DAG);
5305 case ISD::VACOPY: return LowerVACOPY(Op, DAG);
5306 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
5307 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
5308 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
5309 case ISD::FRAME_TO_ARGS_OFFSET:
5310 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
5311 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
5312 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
Duncan Sandsd8455ca2007-07-27 20:02:49 +00005313 case ISD::TRAMPOLINE: return LowerTRAMPOLINE(Op, DAG);
Dan Gohman819574c2008-01-31 00:41:03 +00005314 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
Evan Cheng48679f42007-12-14 02:13:44 +00005315 case ISD::CTLZ: return LowerCTLZ(Op, DAG);
5316 case ISD::CTTZ: return LowerCTTZ(Op, DAG);
Chris Lattnerdfb947d2007-11-24 07:07:01 +00005317
5318 // FIXME: REMOVE THIS WHEN LegalizeDAGTypes lands.
5319 case ISD::READCYCLECOUNTER:
5320 return SDOperand(ExpandREADCYCLECOUNTER(Op.Val, DAG), 0);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005321 }
Chris Lattnerdfb947d2007-11-24 07:07:01 +00005322}
5323
5324/// ExpandOperation - Provide custom lowering hooks for expanding operations.
5325SDNode *X86TargetLowering::ExpandOperationResult(SDNode *N, SelectionDAG &DAG) {
5326 switch (N->getOpcode()) {
5327 default: assert(0 && "Should not custom lower this!");
5328 case ISD::FP_TO_SINT: return ExpandFP_TO_SINT(N, DAG);
5329 case ISD::READCYCLECOUNTER: return ExpandREADCYCLECOUNTER(N, DAG);
5330 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005331}
5332
5333const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
5334 switch (Opcode) {
5335 default: return NULL;
Evan Cheng48679f42007-12-14 02:13:44 +00005336 case X86ISD::BSF: return "X86ISD::BSF";
5337 case X86ISD::BSR: return "X86ISD::BSR";
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005338 case X86ISD::SHLD: return "X86ISD::SHLD";
5339 case X86ISD::SHRD: return "X86ISD::SHRD";
5340 case X86ISD::FAND: return "X86ISD::FAND";
5341 case X86ISD::FOR: return "X86ISD::FOR";
5342 case X86ISD::FXOR: return "X86ISD::FXOR";
5343 case X86ISD::FSRL: return "X86ISD::FSRL";
5344 case X86ISD::FILD: return "X86ISD::FILD";
5345 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
5346 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
5347 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
5348 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
5349 case X86ISD::FLD: return "X86ISD::FLD";
5350 case X86ISD::FST: return "X86ISD::FST";
5351 case X86ISD::FP_GET_RESULT: return "X86ISD::FP_GET_RESULT";
Evan Cheng931a8f42008-01-29 19:34:22 +00005352 case X86ISD::FP_GET_RESULT2: return "X86ISD::FP_GET_RESULT2";
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005353 case X86ISD::FP_SET_RESULT: return "X86ISD::FP_SET_RESULT";
5354 case X86ISD::CALL: return "X86ISD::CALL";
5355 case X86ISD::TAILCALL: return "X86ISD::TAILCALL";
5356 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
5357 case X86ISD::CMP: return "X86ISD::CMP";
5358 case X86ISD::COMI: return "X86ISD::COMI";
5359 case X86ISD::UCOMI: return "X86ISD::UCOMI";
5360 case X86ISD::SETCC: return "X86ISD::SETCC";
5361 case X86ISD::CMOV: return "X86ISD::CMOV";
5362 case X86ISD::BRCOND: return "X86ISD::BRCOND";
5363 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
5364 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
5365 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005366 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
5367 case X86ISD::Wrapper: return "X86ISD::Wrapper";
Nate Begemand77e59e2008-02-11 04:19:36 +00005368 case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005369 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
Nate Begemand77e59e2008-02-11 04:19:36 +00005370 case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
5371 case X86ISD::PINSRB: return "X86ISD::PINSRB";
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005372 case X86ISD::PINSRW: return "X86ISD::PINSRW";
5373 case X86ISD::FMAX: return "X86ISD::FMAX";
5374 case X86ISD::FMIN: return "X86ISD::FMIN";
5375 case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
5376 case X86ISD::FRCP: return "X86ISD::FRCP";
5377 case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
5378 case X86ISD::THREAD_POINTER: return "X86ISD::THREAD_POINTER";
5379 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +00005380 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
Anton Korobeynikovfbe230e2007-11-16 01:31:51 +00005381 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005382 }
5383}
5384
5385// isLegalAddressingMode - Return true if the addressing mode represented
5386// by AM is legal for this target, for a load/store of the specified type.
5387bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
5388 const Type *Ty) const {
5389 // X86 supports extremely general addressing modes.
5390
5391 // X86 allows a sign-extended 32-bit immediate field as a displacement.
5392 if (AM.BaseOffs <= -(1LL << 32) || AM.BaseOffs >= (1LL << 32)-1)
5393 return false;
5394
5395 if (AM.BaseGV) {
Evan Cheng6a1f3f12007-08-01 23:46:47 +00005396 // We can only fold this if we don't need an extra load.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005397 if (Subtarget->GVRequiresExtraLoad(AM.BaseGV, getTargetMachine(), false))
5398 return false;
Evan Cheng6a1f3f12007-08-01 23:46:47 +00005399
5400 // X86-64 only supports addr of globals in small code model.
5401 if (Subtarget->is64Bit()) {
5402 if (getTargetMachine().getCodeModel() != CodeModel::Small)
5403 return false;
5404 // If lower 4G is not available, then we must use rip-relative addressing.
5405 if (AM.BaseOffs || AM.Scale > 1)
5406 return false;
5407 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005408 }
5409
5410 switch (AM.Scale) {
5411 case 0:
5412 case 1:
5413 case 2:
5414 case 4:
5415 case 8:
5416 // These scales always work.
5417 break;
5418 case 3:
5419 case 5:
5420 case 9:
5421 // These scales are formed with basereg+scalereg. Only accept if there is
5422 // no basereg yet.
5423 if (AM.HasBaseReg)
5424 return false;
5425 break;
5426 default: // Other stuff never works.
5427 return false;
5428 }
5429
5430 return true;
5431}
5432
5433
Evan Cheng27a820a2007-10-26 01:56:11 +00005434bool X86TargetLowering::isTruncateFree(const Type *Ty1, const Type *Ty2) const {
5435 if (!Ty1->isInteger() || !Ty2->isInteger())
5436 return false;
Evan Cheng7f152602007-10-29 07:57:50 +00005437 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
5438 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
5439 if (NumBits1 <= NumBits2)
5440 return false;
5441 return Subtarget->is64Bit() || NumBits1 < 64;
Evan Cheng27a820a2007-10-26 01:56:11 +00005442}
5443
Evan Cheng9decb332007-10-29 19:58:20 +00005444bool X86TargetLowering::isTruncateFree(MVT::ValueType VT1,
5445 MVT::ValueType VT2) const {
5446 if (!MVT::isInteger(VT1) || !MVT::isInteger(VT2))
5447 return false;
5448 unsigned NumBits1 = MVT::getSizeInBits(VT1);
5449 unsigned NumBits2 = MVT::getSizeInBits(VT2);
5450 if (NumBits1 <= NumBits2)
5451 return false;
5452 return Subtarget->is64Bit() || NumBits1 < 64;
5453}
Evan Cheng27a820a2007-10-26 01:56:11 +00005454
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005455/// isShuffleMaskLegal - Targets can use this to indicate that they only
5456/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
5457/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
5458/// are assumed to be legal.
5459bool
5460X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const {
5461 // Only do shuffles on 128-bit vector types for now.
5462 if (MVT::getSizeInBits(VT) == 64) return false;
5463 return (Mask.Val->getNumOperands() <= 4 ||
5464 isIdentityMask(Mask.Val) ||
5465 isIdentityMask(Mask.Val, true) ||
5466 isSplatMask(Mask.Val) ||
5467 isPSHUFHW_PSHUFLWMask(Mask.Val) ||
5468 X86::isUNPCKLMask(Mask.Val) ||
5469 X86::isUNPCKHMask(Mask.Val) ||
5470 X86::isUNPCKL_v_undef_Mask(Mask.Val) ||
5471 X86::isUNPCKH_v_undef_Mask(Mask.Val));
5472}
5473
5474bool X86TargetLowering::isVectorClearMaskLegal(std::vector<SDOperand> &BVOps,
5475 MVT::ValueType EVT,
5476 SelectionDAG &DAG) const {
5477 unsigned NumElts = BVOps.size();
5478 // Only do shuffles on 128-bit vector types for now.
5479 if (MVT::getSizeInBits(EVT) * NumElts == 64) return false;
5480 if (NumElts == 2) return true;
5481 if (NumElts == 4) {
5482 return (isMOVLMask(&BVOps[0], 4) ||
5483 isCommutedMOVL(&BVOps[0], 4, true) ||
5484 isSHUFPMask(&BVOps[0], 4) ||
5485 isCommutedSHUFP(&BVOps[0], 4));
5486 }
5487 return false;
5488}
5489
5490//===----------------------------------------------------------------------===//
5491// X86 Scheduler Hooks
5492//===----------------------------------------------------------------------===//
5493
5494MachineBasicBlock *
Evan Chenge637db12008-01-30 18:18:23 +00005495X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
5496 MachineBasicBlock *BB) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005497 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
5498 switch (MI->getOpcode()) {
5499 default: assert(false && "Unexpected instr type to insert");
5500 case X86::CMOV_FR32:
5501 case X86::CMOV_FR64:
5502 case X86::CMOV_V4F32:
5503 case X86::CMOV_V2F64:
Evan Cheng621216e2007-09-29 00:00:36 +00005504 case X86::CMOV_V2I64: {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005505 // To "insert" a SELECT_CC instruction, we actually have to insert the
5506 // diamond control-flow pattern. The incoming instruction knows the
5507 // destination vreg to set, the condition code register to branch on, the
5508 // true/false values to select between, and a branch opcode to use.
5509 const BasicBlock *LLVM_BB = BB->getBasicBlock();
5510 ilist<MachineBasicBlock>::iterator It = BB;
5511 ++It;
5512
5513 // thisMBB:
5514 // ...
5515 // TrueVal = ...
5516 // cmpTY ccX, r1, r2
5517 // bCC copy1MBB
5518 // fallthrough --> copy0MBB
5519 MachineBasicBlock *thisMBB = BB;
5520 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB);
5521 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
5522 unsigned Opc =
5523 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
5524 BuildMI(BB, TII->get(Opc)).addMBB(sinkMBB);
5525 MachineFunction *F = BB->getParent();
5526 F->getBasicBlockList().insert(It, copy0MBB);
5527 F->getBasicBlockList().insert(It, sinkMBB);
5528 // Update machine-CFG edges by first adding all successors of the current
5529 // block to the new block which will contain the Phi node for the select.
5530 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(),
5531 e = BB->succ_end(); i != e; ++i)
5532 sinkMBB->addSuccessor(*i);
5533 // Next, remove all successors of the current block, and add the true
5534 // and fallthrough blocks as its successors.
5535 while(!BB->succ_empty())
5536 BB->removeSuccessor(BB->succ_begin());
5537 BB->addSuccessor(copy0MBB);
5538 BB->addSuccessor(sinkMBB);
5539
5540 // copy0MBB:
5541 // %FalseValue = ...
5542 // # fallthrough to sinkMBB
5543 BB = copy0MBB;
5544
5545 // Update machine-CFG edges
5546 BB->addSuccessor(sinkMBB);
5547
5548 // sinkMBB:
5549 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
5550 // ...
5551 BB = sinkMBB;
5552 BuildMI(BB, TII->get(X86::PHI), MI->getOperand(0).getReg())
5553 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
5554 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
5555
5556 delete MI; // The pseudo instruction is gone now.
5557 return BB;
5558 }
5559
5560 case X86::FP32_TO_INT16_IN_MEM:
5561 case X86::FP32_TO_INT32_IN_MEM:
5562 case X86::FP32_TO_INT64_IN_MEM:
5563 case X86::FP64_TO_INT16_IN_MEM:
5564 case X86::FP64_TO_INT32_IN_MEM:
Dale Johannesen6d0e36a2007-08-07 01:17:37 +00005565 case X86::FP64_TO_INT64_IN_MEM:
5566 case X86::FP80_TO_INT16_IN_MEM:
5567 case X86::FP80_TO_INT32_IN_MEM:
5568 case X86::FP80_TO_INT64_IN_MEM: {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005569 // Change the floating point control register to use "round towards zero"
5570 // mode when truncating to an integer value.
5571 MachineFunction *F = BB->getParent();
5572 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
5573 addFrameReference(BuildMI(BB, TII->get(X86::FNSTCW16m)), CWFrameIdx);
5574
5575 // Load the old value of the high byte of the control word...
5576 unsigned OldCW =
Chris Lattner1b989192007-12-31 04:13:23 +00005577 F->getRegInfo().createVirtualRegister(X86::GR16RegisterClass);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005578 addFrameReference(BuildMI(BB, TII->get(X86::MOV16rm), OldCW), CWFrameIdx);
5579
5580 // Set the high part to be round to zero...
5581 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mi)), CWFrameIdx)
5582 .addImm(0xC7F);
5583
5584 // Reload the modified control word now...
5585 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx);
5586
5587 // Restore the memory image of control word to original value
5588 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mr)), CWFrameIdx)
5589 .addReg(OldCW);
5590
5591 // Get the X86 opcode to use.
5592 unsigned Opc;
5593 switch (MI->getOpcode()) {
5594 default: assert(0 && "illegal opcode!");
5595 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
5596 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
5597 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
5598 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
5599 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
5600 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
Dale Johannesen6d0e36a2007-08-07 01:17:37 +00005601 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
5602 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
5603 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005604 }
5605
5606 X86AddressMode AM;
5607 MachineOperand &Op = MI->getOperand(0);
5608 if (Op.isRegister()) {
5609 AM.BaseType = X86AddressMode::RegBase;
5610 AM.Base.Reg = Op.getReg();
5611 } else {
5612 AM.BaseType = X86AddressMode::FrameIndexBase;
Chris Lattner6017d482007-12-30 23:10:15 +00005613 AM.Base.FrameIndex = Op.getIndex();
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005614 }
5615 Op = MI->getOperand(1);
5616 if (Op.isImmediate())
5617 AM.Scale = Op.getImm();
5618 Op = MI->getOperand(2);
5619 if (Op.isImmediate())
5620 AM.IndexReg = Op.getImm();
5621 Op = MI->getOperand(3);
5622 if (Op.isGlobalAddress()) {
5623 AM.GV = Op.getGlobal();
5624 } else {
5625 AM.Disp = Op.getImm();
5626 }
5627 addFullAddress(BuildMI(BB, TII->get(Opc)), AM)
5628 .addReg(MI->getOperand(4).getReg());
5629
5630 // Reload the original control word now.
5631 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx);
5632
5633 delete MI; // The pseudo instruction is gone now.
5634 return BB;
5635 }
5636 }
5637}
5638
5639//===----------------------------------------------------------------------===//
5640// X86 Optimization Hooks
5641//===----------------------------------------------------------------------===//
5642
5643void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
Dan Gohmand0dfc772008-02-13 22:28:48 +00005644 const APInt &Mask,
Dan Gohman229fa052008-02-13 00:35:47 +00005645 APInt &KnownZero,
5646 APInt &KnownOne,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005647 const SelectionDAG &DAG,
5648 unsigned Depth) const {
5649 unsigned Opc = Op.getOpcode();
5650 assert((Opc >= ISD::BUILTIN_OP_END ||
5651 Opc == ISD::INTRINSIC_WO_CHAIN ||
5652 Opc == ISD::INTRINSIC_W_CHAIN ||
5653 Opc == ISD::INTRINSIC_VOID) &&
5654 "Should use MaskedValueIsZero if you don't know whether Op"
5655 " is a target node!");
5656
Dan Gohman1d79e432008-02-13 23:07:24 +00005657 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); // Don't know anything.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005658 switch (Opc) {
5659 default: break;
5660 case X86ISD::SETCC:
Dan Gohman229fa052008-02-13 00:35:47 +00005661 KnownZero |= APInt::getHighBitsSet(Mask.getBitWidth(),
5662 Mask.getBitWidth() - 1);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005663 break;
5664 }
5665}
5666
5667/// getShuffleScalarElt - Returns the scalar element that will make up the ith
5668/// element of the result of the vector shuffle.
5669static SDOperand getShuffleScalarElt(SDNode *N, unsigned i, SelectionDAG &DAG) {
5670 MVT::ValueType VT = N->getValueType(0);
5671 SDOperand PermMask = N->getOperand(2);
5672 unsigned NumElems = PermMask.getNumOperands();
5673 SDOperand V = (i < NumElems) ? N->getOperand(0) : N->getOperand(1);
5674 i %= NumElems;
5675 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) {
5676 return (i == 0)
Arnold Schwaighofere2d6bbb2007-10-11 19:40:01 +00005677 ? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005678 } else if (V.getOpcode() == ISD::VECTOR_SHUFFLE) {
5679 SDOperand Idx = PermMask.getOperand(i);
5680 if (Idx.getOpcode() == ISD::UNDEF)
5681 return DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT));
5682 return getShuffleScalarElt(V.Val,cast<ConstantSDNode>(Idx)->getValue(),DAG);
5683 }
5684 return SDOperand();
5685}
5686
5687/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
5688/// node is a GlobalAddress + an offset.
5689static bool isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) {
5690 unsigned Opc = N->getOpcode();
5691 if (Opc == X86ISD::Wrapper) {
5692 if (dyn_cast<GlobalAddressSDNode>(N->getOperand(0))) {
5693 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
5694 return true;
5695 }
5696 } else if (Opc == ISD::ADD) {
5697 SDOperand N1 = N->getOperand(0);
5698 SDOperand N2 = N->getOperand(1);
5699 if (isGAPlusOffset(N1.Val, GA, Offset)) {
5700 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2);
5701 if (V) {
5702 Offset += V->getSignExtended();
5703 return true;
5704 }
5705 } else if (isGAPlusOffset(N2.Val, GA, Offset)) {
5706 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1);
5707 if (V) {
5708 Offset += V->getSignExtended();
5709 return true;
5710 }
5711 }
5712 }
5713 return false;
5714}
5715
5716/// isConsecutiveLoad - Returns true if N is loading from an address of Base
5717/// + Dist * Size.
5718static bool isConsecutiveLoad(SDNode *N, SDNode *Base, int Dist, int Size,
5719 MachineFrameInfo *MFI) {
5720 if (N->getOperand(0).Val != Base->getOperand(0).Val)
5721 return false;
5722
5723 SDOperand Loc = N->getOperand(1);
5724 SDOperand BaseLoc = Base->getOperand(1);
5725 if (Loc.getOpcode() == ISD::FrameIndex) {
5726 if (BaseLoc.getOpcode() != ISD::FrameIndex)
5727 return false;
Dan Gohman53491e92007-07-23 20:24:29 +00005728 int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
5729 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005730 int FS = MFI->getObjectSize(FI);
5731 int BFS = MFI->getObjectSize(BFI);
5732 if (FS != BFS || FS != Size) return false;
5733 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Size);
5734 } else {
5735 GlobalValue *GV1 = NULL;
5736 GlobalValue *GV2 = NULL;
5737 int64_t Offset1 = 0;
5738 int64_t Offset2 = 0;
5739 bool isGA1 = isGAPlusOffset(Loc.Val, GV1, Offset1);
5740 bool isGA2 = isGAPlusOffset(BaseLoc.Val, GV2, Offset2);
5741 if (isGA1 && isGA2 && GV1 == GV2)
5742 return Offset1 == (Offset2 + Dist*Size);
5743 }
5744
5745 return false;
5746}
5747
5748static bool isBaseAlignment16(SDNode *Base, MachineFrameInfo *MFI,
5749 const X86Subtarget *Subtarget) {
5750 GlobalValue *GV;
Nick Lewycky4bd3fca2008-02-02 08:29:58 +00005751 int64_t Offset = 0;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005752 if (isGAPlusOffset(Base, GV, Offset))
5753 return (GV->getAlignment() >= 16 && (Offset % 16) == 0);
Chris Lattner3834cf32008-01-26 20:07:42 +00005754 // DAG combine handles the stack object case.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005755 return false;
5756}
5757
5758
5759/// PerformShuffleCombine - Combine a vector_shuffle that is equal to
5760/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load
5761/// if the load addresses are consecutive, non-overlapping, and in the right
5762/// order.
5763static SDOperand PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
5764 const X86Subtarget *Subtarget) {
5765 MachineFunction &MF = DAG.getMachineFunction();
5766 MachineFrameInfo *MFI = MF.getFrameInfo();
5767 MVT::ValueType VT = N->getValueType(0);
5768 MVT::ValueType EVT = MVT::getVectorElementType(VT);
5769 SDOperand PermMask = N->getOperand(2);
5770 int NumElems = (int)PermMask.getNumOperands();
5771 SDNode *Base = NULL;
5772 for (int i = 0; i < NumElems; ++i) {
5773 SDOperand Idx = PermMask.getOperand(i);
5774 if (Idx.getOpcode() == ISD::UNDEF) {
5775 if (!Base) return SDOperand();
5776 } else {
5777 SDOperand Arg =
5778 getShuffleScalarElt(N, cast<ConstantSDNode>(Idx)->getValue(), DAG);
5779 if (!Arg.Val || !ISD::isNON_EXTLoad(Arg.Val))
5780 return SDOperand();
5781 if (!Base)
5782 Base = Arg.Val;
5783 else if (!isConsecutiveLoad(Arg.Val, Base,
5784 i, MVT::getSizeInBits(EVT)/8,MFI))
5785 return SDOperand();
5786 }
5787 }
5788
5789 bool isAlign16 = isBaseAlignment16(Base->getOperand(1).Val, MFI, Subtarget);
Dan Gohman11821702007-07-27 17:16:43 +00005790 LoadSDNode *LD = cast<LoadSDNode>(Base);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005791 if (isAlign16) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005792 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(),
Dan Gohman11821702007-07-27 17:16:43 +00005793 LD->getSrcValueOffset(), LD->isVolatile());
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005794 } else {
Dan Gohman11821702007-07-27 17:16:43 +00005795 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(),
5796 LD->getSrcValueOffset(), LD->isVolatile(),
5797 LD->getAlignment());
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005798 }
5799}
5800
5801/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes.
5802static SDOperand PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
5803 const X86Subtarget *Subtarget) {
5804 SDOperand Cond = N->getOperand(0);
5805
5806 // If we have SSE[12] support, try to form min/max nodes.
5807 if (Subtarget->hasSSE2() &&
5808 (N->getValueType(0) == MVT::f32 || N->getValueType(0) == MVT::f64)) {
5809 if (Cond.getOpcode() == ISD::SETCC) {
5810 // Get the LHS/RHS of the select.
5811 SDOperand LHS = N->getOperand(1);
5812 SDOperand RHS = N->getOperand(2);
5813 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
5814
5815 unsigned Opcode = 0;
5816 if (LHS == Cond.getOperand(0) && RHS == Cond.getOperand(1)) {
5817 switch (CC) {
5818 default: break;
5819 case ISD::SETOLE: // (X <= Y) ? X : Y -> min
5820 case ISD::SETULE:
5821 case ISD::SETLE:
5822 if (!UnsafeFPMath) break;
5823 // FALL THROUGH.
5824 case ISD::SETOLT: // (X olt/lt Y) ? X : Y -> min
5825 case ISD::SETLT:
5826 Opcode = X86ISD::FMIN;
5827 break;
5828
5829 case ISD::SETOGT: // (X > Y) ? X : Y -> max
5830 case ISD::SETUGT:
5831 case ISD::SETGT:
5832 if (!UnsafeFPMath) break;
5833 // FALL THROUGH.
5834 case ISD::SETUGE: // (X uge/ge Y) ? X : Y -> max
5835 case ISD::SETGE:
5836 Opcode = X86ISD::FMAX;
5837 break;
5838 }
5839 } else if (LHS == Cond.getOperand(1) && RHS == Cond.getOperand(0)) {
5840 switch (CC) {
5841 default: break;
5842 case ISD::SETOGT: // (X > Y) ? Y : X -> min
5843 case ISD::SETUGT:
5844 case ISD::SETGT:
5845 if (!UnsafeFPMath) break;
5846 // FALL THROUGH.
5847 case ISD::SETUGE: // (X uge/ge Y) ? Y : X -> min
5848 case ISD::SETGE:
5849 Opcode = X86ISD::FMIN;
5850 break;
5851
5852 case ISD::SETOLE: // (X <= Y) ? Y : X -> max
5853 case ISD::SETULE:
5854 case ISD::SETLE:
5855 if (!UnsafeFPMath) break;
5856 // FALL THROUGH.
5857 case ISD::SETOLT: // (X olt/lt Y) ? Y : X -> max
5858 case ISD::SETLT:
5859 Opcode = X86ISD::FMAX;
5860 break;
5861 }
5862 }
5863
5864 if (Opcode)
5865 return DAG.getNode(Opcode, N->getValueType(0), LHS, RHS);
5866 }
5867
5868 }
5869
5870 return SDOperand();
5871}
5872
Chris Lattner470d5dc2008-01-25 06:14:17 +00005873/// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and
5874/// X86ISD::FXOR nodes.
Chris Lattnerf82998f2008-01-25 05:46:26 +00005875static SDOperand PerformFORCombine(SDNode *N, SelectionDAG &DAG) {
Chris Lattner470d5dc2008-01-25 06:14:17 +00005876 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
5877 // F[X]OR(0.0, x) -> x
5878 // F[X]OR(x, 0.0) -> x
Chris Lattnerf82998f2008-01-25 05:46:26 +00005879 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
5880 if (C->getValueAPF().isPosZero())
5881 return N->getOperand(1);
5882 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
5883 if (C->getValueAPF().isPosZero())
5884 return N->getOperand(0);
5885 return SDOperand();
5886}
5887
5888/// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes.
5889static SDOperand PerformFANDCombine(SDNode *N, SelectionDAG &DAG) {
5890 // FAND(0.0, x) -> 0.0
5891 // FAND(x, 0.0) -> 0.0
5892 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
5893 if (C->getValueAPF().isPosZero())
5894 return N->getOperand(0);
5895 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
5896 if (C->getValueAPF().isPosZero())
5897 return N->getOperand(1);
5898 return SDOperand();
5899}
5900
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005901
5902SDOperand X86TargetLowering::PerformDAGCombine(SDNode *N,
5903 DAGCombinerInfo &DCI) const {
5904 SelectionDAG &DAG = DCI.DAG;
5905 switch (N->getOpcode()) {
5906 default: break;
Chris Lattnerf82998f2008-01-25 05:46:26 +00005907 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, Subtarget);
5908 case ISD::SELECT: return PerformSELECTCombine(N, DAG, Subtarget);
Chris Lattner470d5dc2008-01-25 06:14:17 +00005909 case X86ISD::FXOR:
Chris Lattnerf82998f2008-01-25 05:46:26 +00005910 case X86ISD::FOR: return PerformFORCombine(N, DAG);
5911 case X86ISD::FAND: return PerformFANDCombine(N, DAG);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005912 }
5913
5914 return SDOperand();
5915}
5916
5917//===----------------------------------------------------------------------===//
5918// X86 Inline Assembly Support
5919//===----------------------------------------------------------------------===//
5920
5921/// getConstraintType - Given a constraint letter, return the type of
5922/// constraint it is for this target.
5923X86TargetLowering::ConstraintType
5924X86TargetLowering::getConstraintType(const std::string &Constraint) const {
5925 if (Constraint.size() == 1) {
5926 switch (Constraint[0]) {
5927 case 'A':
5928 case 'r':
5929 case 'R':
5930 case 'l':
5931 case 'q':
5932 case 'Q':
5933 case 'x':
5934 case 'Y':
5935 return C_RegisterClass;
5936 default:
5937 break;
5938 }
5939 }
5940 return TargetLowering::getConstraintType(Constraint);
5941}
5942
Dale Johannesene99fc902008-01-29 02:21:21 +00005943/// LowerXConstraint - try to replace an X constraint, which matches anything,
5944/// with another that has more specific requirements based on the type of the
5945/// corresponding operand.
5946void X86TargetLowering::lowerXConstraint(MVT::ValueType ConstraintVT,
5947 std::string& s) const {
5948 if (MVT::isFloatingPoint(ConstraintVT)) {
5949 if (Subtarget->hasSSE2())
5950 s = "Y";
5951 else if (Subtarget->hasSSE1())
5952 s = "x";
5953 else
5954 s = "f";
5955 } else
5956 return TargetLowering::lowerXConstraint(ConstraintVT, s);
5957}
5958
Chris Lattnera531abc2007-08-25 00:47:38 +00005959/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
5960/// vector. If it is invalid, don't add anything to Ops.
5961void X86TargetLowering::LowerAsmOperandForConstraint(SDOperand Op,
5962 char Constraint,
5963 std::vector<SDOperand>&Ops,
5964 SelectionDAG &DAG) {
5965 SDOperand Result(0, 0);
5966
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005967 switch (Constraint) {
5968 default: break;
5969 case 'I':
5970 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
Chris Lattnera531abc2007-08-25 00:47:38 +00005971 if (C->getValue() <= 31) {
5972 Result = DAG.getTargetConstant(C->getValue(), Op.getValueType());
5973 break;
5974 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005975 }
Chris Lattnera531abc2007-08-25 00:47:38 +00005976 return;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005977 case 'N':
5978 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
Chris Lattnera531abc2007-08-25 00:47:38 +00005979 if (C->getValue() <= 255) {
5980 Result = DAG.getTargetConstant(C->getValue(), Op.getValueType());
5981 break;
5982 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005983 }
Chris Lattnera531abc2007-08-25 00:47:38 +00005984 return;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005985 case 'i': {
5986 // Literal immediates are always ok.
Chris Lattnera531abc2007-08-25 00:47:38 +00005987 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
5988 Result = DAG.getTargetConstant(CST->getValue(), Op.getValueType());
5989 break;
5990 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00005991
5992 // If we are in non-pic codegen mode, we allow the address of a global (with
5993 // an optional displacement) to be used with 'i'.
5994 GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op);
5995 int64_t Offset = 0;
5996
5997 // Match either (GA) or (GA+C)
5998 if (GA) {
5999 Offset = GA->getOffset();
6000 } else if (Op.getOpcode() == ISD::ADD) {
6001 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
6002 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0));
6003 if (C && GA) {
6004 Offset = GA->getOffset()+C->getValue();
6005 } else {
6006 C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
6007 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0));
6008 if (C && GA)
6009 Offset = GA->getOffset()+C->getValue();
6010 else
6011 C = 0, GA = 0;
6012 }
6013 }
6014
6015 if (GA) {
6016 // If addressing this global requires a load (e.g. in PIC mode), we can't
6017 // match.
6018 if (Subtarget->GVRequiresExtraLoad(GA->getGlobal(), getTargetMachine(),
6019 false))
Chris Lattnera531abc2007-08-25 00:47:38 +00006020 return;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00006021
6022 Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0),
6023 Offset);
Chris Lattnera531abc2007-08-25 00:47:38 +00006024 Result = Op;
6025 break;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00006026 }
6027
6028 // Otherwise, not valid for this mode.
Chris Lattnera531abc2007-08-25 00:47:38 +00006029 return;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00006030 }
6031 }
Chris Lattnera531abc2007-08-25 00:47:38 +00006032
6033 if (Result.Val) {
6034 Ops.push_back(Result);
6035 return;
6036 }
6037 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00006038}
6039
6040std::vector<unsigned> X86TargetLowering::
6041getRegClassForInlineAsmConstraint(const std::string &Constraint,
6042 MVT::ValueType VT) const {
6043 if (Constraint.size() == 1) {
6044 // FIXME: not handling fp-stack yet!
6045 switch (Constraint[0]) { // GCC X86 Constraint Letters
6046 default: break; // Unknown constraint letter
6047 case 'A': // EAX/EDX
6048 if (VT == MVT::i32 || VT == MVT::i64)
6049 return make_vector<unsigned>(X86::EAX, X86::EDX, 0);
6050 break;
6051 case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode)
6052 case 'Q': // Q_REGS
6053 if (VT == MVT::i32)
6054 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0);
6055 else if (VT == MVT::i16)
6056 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0);
6057 else if (VT == MVT::i8)
Evan Chengf85c10f2007-08-13 23:27:11 +00006058 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 0);
Chris Lattner35032592007-11-04 06:51:12 +00006059 else if (VT == MVT::i64)
6060 return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX, 0);
6061 break;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00006062 }
6063 }
6064
6065 return std::vector<unsigned>();
6066}
6067
6068std::pair<unsigned, const TargetRegisterClass*>
6069X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
6070 MVT::ValueType VT) const {
6071 // First, see if this is a constraint that directly corresponds to an LLVM
6072 // register class.
6073 if (Constraint.size() == 1) {
6074 // GCC Constraint Letters
6075 switch (Constraint[0]) {
6076 default: break;
6077 case 'r': // GENERAL_REGS
6078 case 'R': // LEGACY_REGS
6079 case 'l': // INDEX_REGS
6080 if (VT == MVT::i64 && Subtarget->is64Bit())
6081 return std::make_pair(0U, X86::GR64RegisterClass);
6082 if (VT == MVT::i32)
6083 return std::make_pair(0U, X86::GR32RegisterClass);
6084 else if (VT == MVT::i16)
6085 return std::make_pair(0U, X86::GR16RegisterClass);
6086 else if (VT == MVT::i8)
6087 return std::make_pair(0U, X86::GR8RegisterClass);
6088 break;
6089 case 'y': // MMX_REGS if MMX allowed.
6090 if (!Subtarget->hasMMX()) break;
6091 return std::make_pair(0U, X86::VR64RegisterClass);
6092 break;
6093 case 'Y': // SSE_REGS if SSE2 allowed
6094 if (!Subtarget->hasSSE2()) break;
6095 // FALL THROUGH.
6096 case 'x': // SSE_REGS if SSE1 allowed
6097 if (!Subtarget->hasSSE1()) break;
6098
6099 switch (VT) {
6100 default: break;
6101 // Scalar SSE types.
6102 case MVT::f32:
6103 case MVT::i32:
6104 return std::make_pair(0U, X86::FR32RegisterClass);
6105 case MVT::f64:
6106 case MVT::i64:
6107 return std::make_pair(0U, X86::FR64RegisterClass);
6108 // Vector types.
6109 case MVT::v16i8:
6110 case MVT::v8i16:
6111 case MVT::v4i32:
6112 case MVT::v2i64:
6113 case MVT::v4f32:
6114 case MVT::v2f64:
6115 return std::make_pair(0U, X86::VR128RegisterClass);
6116 }
6117 break;
6118 }
6119 }
6120
6121 // Use the default implementation in TargetLowering to convert the register
6122 // constraint into a member of a register class.
6123 std::pair<unsigned, const TargetRegisterClass*> Res;
6124 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
6125
6126 // Not found as a standard register?
6127 if (Res.second == 0) {
6128 // GCC calls "st(0)" just plain "st".
6129 if (StringsEqualNoCase("{st}", Constraint)) {
6130 Res.first = X86::ST0;
Chris Lattner3cfe51b2007-09-24 05:27:37 +00006131 Res.second = X86::RFP80RegisterClass;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00006132 }
6133
6134 return Res;
6135 }
6136
6137 // Otherwise, check to see if this is a register class of the wrong value
6138 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
6139 // turn into {ax},{dx}.
6140 if (Res.second->hasType(VT))
6141 return Res; // Correct type already, nothing to do.
6142
6143 // All of the single-register GCC register classes map their values onto
6144 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we
6145 // really want an 8-bit or 32-bit register, map to the appropriate register
6146 // class and return the appropriate register.
6147 if (Res.second != X86::GR16RegisterClass)
6148 return Res;
6149
6150 if (VT == MVT::i8) {
6151 unsigned DestReg = 0;
6152 switch (Res.first) {
6153 default: break;
6154 case X86::AX: DestReg = X86::AL; break;
6155 case X86::DX: DestReg = X86::DL; break;
6156 case X86::CX: DestReg = X86::CL; break;
6157 case X86::BX: DestReg = X86::BL; break;
6158 }
6159 if (DestReg) {
6160 Res.first = DestReg;
6161 Res.second = Res.second = X86::GR8RegisterClass;
6162 }
6163 } else if (VT == MVT::i32) {
6164 unsigned DestReg = 0;
6165 switch (Res.first) {
6166 default: break;
6167 case X86::AX: DestReg = X86::EAX; break;
6168 case X86::DX: DestReg = X86::EDX; break;
6169 case X86::CX: DestReg = X86::ECX; break;
6170 case X86::BX: DestReg = X86::EBX; break;
6171 case X86::SI: DestReg = X86::ESI; break;
6172 case X86::DI: DestReg = X86::EDI; break;
6173 case X86::BP: DestReg = X86::EBP; break;
6174 case X86::SP: DestReg = X86::ESP; break;
6175 }
6176 if (DestReg) {
6177 Res.first = DestReg;
6178 Res.second = Res.second = X86::GR32RegisterClass;
6179 }
6180 } else if (VT == MVT::i64) {
6181 unsigned DestReg = 0;
6182 switch (Res.first) {
6183 default: break;
6184 case X86::AX: DestReg = X86::RAX; break;
6185 case X86::DX: DestReg = X86::RDX; break;
6186 case X86::CX: DestReg = X86::RCX; break;
6187 case X86::BX: DestReg = X86::RBX; break;
6188 case X86::SI: DestReg = X86::RSI; break;
6189 case X86::DI: DestReg = X86::RDI; break;
6190 case X86::BP: DestReg = X86::RBP; break;
6191 case X86::SP: DestReg = X86::RSP; break;
6192 }
6193 if (DestReg) {
6194 Res.first = DestReg;
6195 Res.second = Res.second = X86::GR64RegisterClass;
6196 }
6197 }
6198
6199 return Res;
6200}