blob: 01361f2998e7507da595bdbb7e40823d747dbef4 [file] [log] [blame]
Arnold Schwaighofer92226dd2007-10-12 21:53:12 +00001//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +00002//
3// The LLVM Compiler Infrastructure
4//
Chris Lattner4ee451d2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +00007//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the interfaces that X86 uses to lower LLVM code into a
11// selection DAG.
12//
13//===----------------------------------------------------------------------===//
14
15#include "X86.h"
Evan Cheng0cc39452006-01-16 21:21:29 +000016#include "X86InstrBuilder.h"
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +000017#include "X86ISelLowering.h"
18#include "X86TargetMachine.h"
19#include "llvm/CallingConv.h"
Evan Cheng223547a2006-01-31 22:28:30 +000020#include "llvm/Constants.h"
Evan Cheng347d5f72006-04-28 21:29:37 +000021#include "llvm/DerivedTypes.h"
Chris Lattnerb903bed2009-06-26 21:20:29 +000022#include "llvm/GlobalAlias.h"
Lauro Ramos Venanciob3a04172007-04-20 21:38:10 +000023#include "llvm/GlobalVariable.h"
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +000024#include "llvm/Function.h"
Evan Cheng6be2c582006-04-05 23:38:46 +000025#include "llvm/Intrinsics.h"
Evan Cheng14b32e12007-12-11 01:46:18 +000026#include "llvm/ADT/BitVector.h"
Evan Cheng30b37b52006-03-13 23:18:16 +000027#include "llvm/ADT/VectorExtras.h"
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +000028#include "llvm/CodeGen/MachineFrameInfo.h"
Evan Cheng4a460802006-01-11 00:33:36 +000029#include "llvm/CodeGen/MachineFunction.h"
30#include "llvm/CodeGen/MachineInstrBuilder.h"
Evan Chenga844bde2008-02-02 04:07:54 +000031#include "llvm/CodeGen/MachineModuleInfo.h"
Chris Lattner84bc5422007-12-31 04:13:23 +000032#include "llvm/CodeGen/MachineRegisterInfo.h"
Dan Gohman69de1932008-02-06 22:27:42 +000033#include "llvm/CodeGen/PseudoSourceValue.h"
Evan Chengef6ffb12006-01-31 03:14:29 +000034#include "llvm/Support/MathExtras.h"
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +000035#include "llvm/Support/Debug.h"
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +000036#include "llvm/Target/TargetOptions.h"
Evan Cheng14b32e12007-12-11 01:46:18 +000037#include "llvm/ADT/SmallSet.h"
Chris Lattner1a60aa72006-10-31 19:42:44 +000038#include "llvm/ADT/StringExtras.h"
Mon P Wang3c81d352008-11-23 04:37:22 +000039#include "llvm/Support/CommandLine.h"
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +000040using namespace llvm;
41
Mon P Wang3c81d352008-11-23 04:37:22 +000042static cl::opt<bool>
Mon P Wang9f22a4a2008-11-24 02:10:43 +000043DisableMMX("disable-mmx", cl::Hidden, cl::desc("Disable use of MMX"));
Mon P Wang3c81d352008-11-23 04:37:22 +000044
Evan Cheng10e86422008-04-25 19:11:04 +000045// Forward declarations.
Nate Begeman9008ca62009-04-27 18:41:29 +000046static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, MVT VT, SDValue V1,
47 SDValue V2);
Evan Cheng10e86422008-04-25 19:11:04 +000048
Dan Gohmanc9f5f3f2008-05-14 01:58:56 +000049X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +000050 : TargetLowering(TM) {
Evan Cheng559806f2006-01-27 08:10:46 +000051 Subtarget = &TM.getSubtarget<X86Subtarget>();
Dale Johannesenf1fc3a82007-09-23 14:52:20 +000052 X86ScalarSSEf64 = Subtarget->hasSSE2();
53 X86ScalarSSEf32 = Subtarget->hasSSE1();
Evan Cheng25ab6902006-09-08 06:48:29 +000054 X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP;
Anton Korobeynikovbff66b02008-09-09 18:22:57 +000055
Anton Korobeynikov2365f512007-07-14 14:06:15 +000056 RegInfo = TM.getRegisterInfo();
Anton Korobeynikovbff66b02008-09-09 18:22:57 +000057 TD = getTargetData();
Anton Korobeynikov2365f512007-07-14 14:06:15 +000058
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +000059 // Set up the TargetLowering object.
60
61 // X86 is weird, it always uses i8 for shift amounts and setcc results.
62 setShiftAmountType(MVT::i8);
Duncan Sands03228082008-11-23 15:47:28 +000063 setBooleanContents(ZeroOrOneBooleanContent);
Evan Cheng0b2afbd2006-01-25 09:15:17 +000064 setSchedulingPreference(SchedulingForRegPressure);
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +000065 setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0
Evan Cheng25ab6902006-09-08 06:48:29 +000066 setStackPointerRegisterToSaveRestore(X86StackPtr);
Evan Cheng714554d2006-03-16 21:47:42 +000067
Anton Korobeynikovd27a2582006-12-10 23:12:42 +000068 if (Subtarget->isTargetDarwin()) {
Evan Chengdf57fa02006-03-17 20:31:41 +000069 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
Anton Korobeynikovd27a2582006-12-10 23:12:42 +000070 setUseUnderscoreSetJmp(false);
71 setUseUnderscoreLongJmp(false);
Anton Korobeynikov317848f2007-01-03 11:43:14 +000072 } else if (Subtarget->isTargetMingw()) {
Anton Korobeynikovd27a2582006-12-10 23:12:42 +000073 // MS runtime is weird: it exports _setjmp, but longjmp!
74 setUseUnderscoreSetJmp(true);
75 setUseUnderscoreLongJmp(false);
76 } else {
77 setUseUnderscoreSetJmp(true);
78 setUseUnderscoreLongJmp(true);
79 }
Scott Michelfdc40a02009-02-17 22:15:04 +000080
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +000081 // Set up the register classes.
Evan Cheng069287d2006-05-16 07:21:53 +000082 addRegisterClass(MVT::i8, X86::GR8RegisterClass);
83 addRegisterClass(MVT::i16, X86::GR16RegisterClass);
84 addRegisterClass(MVT::i32, X86::GR32RegisterClass);
Evan Cheng25ab6902006-09-08 06:48:29 +000085 if (Subtarget->is64Bit())
86 addRegisterClass(MVT::i64, X86::GR64RegisterClass);
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +000087
Evan Cheng03294662008-10-14 21:26:46 +000088 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
Evan Chengc5484282006-10-04 00:56:09 +000089
Scott Michelfdc40a02009-02-17 22:15:04 +000090 // We don't accept any truncstore of integer registers.
Chris Lattnerddf89562008-01-17 19:59:44 +000091 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
92 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
93 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
94 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
95 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
Evan Cheng7f042682008-10-15 02:05:31 +000096 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
97
98 // SETOEQ and SETUNE require checking two conditions.
99 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
100 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
101 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
102 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
103 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
104 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
Chris Lattnerddf89562008-01-17 19:59:44 +0000105
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000106 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
107 // operation.
108 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
109 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
110 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
Evan Cheng6892f282006-01-17 02:32:49 +0000111
Evan Cheng25ab6902006-09-08 06:48:29 +0000112 if (Subtarget->is64Bit()) {
Evan Cheng6892f282006-01-17 02:32:49 +0000113 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
Bill Wendling105be5a2009-03-13 08:41:47 +0000114 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand);
Eli Friedman948e95a2009-05-23 09:59:16 +0000115 } else if (!UseSoftFloat) {
116 if (X86ScalarSSEf64) {
Dale Johannesen1c15bf52008-10-21 20:50:01 +0000117 // We have an impenetrably clever algorithm for ui64->double only.
118 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
Bill Wendling105be5a2009-03-13 08:41:47 +0000119 }
Eli Friedman948e95a2009-05-23 09:59:16 +0000120 // We have an algorithm for SSE2, and we turn this into a 64-bit
121 // FILD for other targets.
122 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
Evan Cheng25ab6902006-09-08 06:48:29 +0000123 }
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000124
125 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
126 // this operation.
127 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
128 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
Bill Wendling105be5a2009-03-13 08:41:47 +0000129
Devang Patel6a784892009-06-05 18:48:29 +0000130 if (!UseSoftFloat) {
Bill Wendling105be5a2009-03-13 08:41:47 +0000131 // SSE has no i16 to fp conversion, only i32
132 if (X86ScalarSSEf32) {
133 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
134 // f32 and f64 cases are Legal, f80 case is not
135 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
136 } else {
137 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
138 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
139 }
Dale Johannesen9e3d3ab2007-09-14 22:26:36 +0000140 } else {
Bill Wendling105be5a2009-03-13 08:41:47 +0000141 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
142 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote);
Evan Cheng5298bcc2006-02-17 07:01:52 +0000143 }
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000144
Dale Johannesen73328d12007-09-19 23:55:34 +0000145 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
146 // are Legal, f80 is custom lowered.
147 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
148 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
Evan Cheng6dab0532006-01-30 08:02:57 +0000149
Evan Cheng02568ff2006-01-30 22:13:22 +0000150 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
151 // this operation.
152 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
153 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
154
Dale Johannesenf1fc3a82007-09-23 14:52:20 +0000155 if (X86ScalarSSEf32) {
Evan Cheng02568ff2006-01-30 22:13:22 +0000156 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
Dale Johannesen9e3d3ab2007-09-14 22:26:36 +0000157 // f32 and f64 cases are Legal, f80 case is not
158 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
Evan Cheng02568ff2006-01-30 22:13:22 +0000159 } else {
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000160 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
Evan Cheng02568ff2006-01-30 22:13:22 +0000161 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000162 }
163
164 // Handle FP_TO_UINT by promoting the destination to a larger signed
165 // conversion.
166 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
167 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
168 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
169
Evan Cheng25ab6902006-09-08 06:48:29 +0000170 if (Subtarget->is64Bit()) {
171 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000172 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
Eli Friedman948e95a2009-05-23 09:59:16 +0000173 } else if (!UseSoftFloat) {
Dale Johannesenf1fc3a82007-09-23 14:52:20 +0000174 if (X86ScalarSSEf32 && !Subtarget->hasSSE3())
Evan Cheng25ab6902006-09-08 06:48:29 +0000175 // Expand FP_TO_UINT into a select.
176 // FIXME: We would like to use a Custom expander here eventually to do
177 // the optimal thing for SSE vs. the default expansion in the legalizer.
178 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
179 else
Eli Friedman948e95a2009-05-23 09:59:16 +0000180 // With SSE3 we can use fisttpll to convert to a signed i64; without
181 // SSE, we're stuck with a fistpll.
182 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
Evan Cheng25ab6902006-09-08 06:48:29 +0000183 }
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000184
Chris Lattner399610a2006-12-05 18:22:22 +0000185 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
Dale Johannesenf1fc3a82007-09-23 14:52:20 +0000186 if (!X86ScalarSSEf64) {
Chris Lattnerf3597a12006-12-05 18:45:06 +0000187 setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand);
188 setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand);
189 }
Chris Lattner21f66852005-12-23 05:15:23 +0000190
Dan Gohmanb00ee212008-02-18 19:34:53 +0000191 // Scalar integer divide and remainder are lowered to use operations that
192 // produce two results, to match the available instructions. This exposes
193 // the two-result form to trivial CSE, which is able to combine x/y and x%y
194 // into a single instruction.
195 //
196 // Scalar integer multiply-high is also lowered to use two-result
197 // operations, to match the available instructions. However, plain multiply
198 // (low) operations are left as Legal, as there are single-result
199 // instructions for this in x86. Using the two-result multiply instructions
200 // when both high and low results are needed must be arranged by dagcombine.
Dan Gohman525178c2007-10-08 18:33:35 +0000201 setOperationAction(ISD::MULHS , MVT::i8 , Expand);
202 setOperationAction(ISD::MULHU , MVT::i8 , Expand);
203 setOperationAction(ISD::SDIV , MVT::i8 , Expand);
204 setOperationAction(ISD::UDIV , MVT::i8 , Expand);
205 setOperationAction(ISD::SREM , MVT::i8 , Expand);
206 setOperationAction(ISD::UREM , MVT::i8 , Expand);
Dan Gohman525178c2007-10-08 18:33:35 +0000207 setOperationAction(ISD::MULHS , MVT::i16 , Expand);
208 setOperationAction(ISD::MULHU , MVT::i16 , Expand);
209 setOperationAction(ISD::SDIV , MVT::i16 , Expand);
210 setOperationAction(ISD::UDIV , MVT::i16 , Expand);
211 setOperationAction(ISD::SREM , MVT::i16 , Expand);
212 setOperationAction(ISD::UREM , MVT::i16 , Expand);
Dan Gohman525178c2007-10-08 18:33:35 +0000213 setOperationAction(ISD::MULHS , MVT::i32 , Expand);
214 setOperationAction(ISD::MULHU , MVT::i32 , Expand);
215 setOperationAction(ISD::SDIV , MVT::i32 , Expand);
216 setOperationAction(ISD::UDIV , MVT::i32 , Expand);
217 setOperationAction(ISD::SREM , MVT::i32 , Expand);
218 setOperationAction(ISD::UREM , MVT::i32 , Expand);
Dan Gohman525178c2007-10-08 18:33:35 +0000219 setOperationAction(ISD::MULHS , MVT::i64 , Expand);
220 setOperationAction(ISD::MULHU , MVT::i64 , Expand);
221 setOperationAction(ISD::SDIV , MVT::i64 , Expand);
222 setOperationAction(ISD::UDIV , MVT::i64 , Expand);
223 setOperationAction(ISD::SREM , MVT::i64 , Expand);
224 setOperationAction(ISD::UREM , MVT::i64 , Expand);
Dan Gohmana37c9f72007-09-25 18:23:27 +0000225
Evan Chengc35497f2006-10-30 08:02:39 +0000226 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
Evan Cheng5298bcc2006-02-17 07:01:52 +0000227 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
Nate Begeman750ac1b2006-02-01 07:19:44 +0000228 setOperationAction(ISD::BR_CC , MVT::Other, Expand);
229 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand);
Evan Cheng25ab6902006-09-08 06:48:29 +0000230 if (Subtarget->is64Bit())
Christopher Lambc59e5212007-08-10 21:48:46 +0000231 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
232 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
233 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000234 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
235 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
Chris Lattnerd1108222008-03-07 06:36:32 +0000236 setOperationAction(ISD::FREM , MVT::f32 , Expand);
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000237 setOperationAction(ISD::FREM , MVT::f64 , Expand);
Chris Lattnerd1108222008-03-07 06:36:32 +0000238 setOperationAction(ISD::FREM , MVT::f80 , Expand);
Dan Gohman1a024862008-01-31 00:41:03 +0000239 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
Scott Michelfdc40a02009-02-17 22:15:04 +0000240
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000241 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
Evan Cheng18efe262007-12-14 02:13:44 +0000242 setOperationAction(ISD::CTTZ , MVT::i8 , Custom);
243 setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000244 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
Evan Cheng18efe262007-12-14 02:13:44 +0000245 setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
246 setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000247 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
Evan Cheng18efe262007-12-14 02:13:44 +0000248 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
249 setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
Evan Cheng25ab6902006-09-08 06:48:29 +0000250 if (Subtarget->is64Bit()) {
251 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
Evan Cheng18efe262007-12-14 02:13:44 +0000252 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
253 setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
Evan Cheng25ab6902006-09-08 06:48:29 +0000254 }
255
Andrew Lenharthb873ff32005-11-20 21:41:10 +0000256 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
Nate Begemand88fc032006-01-14 03:14:10 +0000257 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
Nate Begeman35ef9132006-01-11 21:21:00 +0000258
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000259 // These should be promoted to a larger select which is supported.
260 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
261 setOperationAction(ISD::SELECT , MVT::i8 , Promote);
Nate Begeman4c5dcf52006-02-17 00:03:04 +0000262 // X86 wants to expand cmov itself.
Evan Cheng5298bcc2006-02-17 07:01:52 +0000263 setOperationAction(ISD::SELECT , MVT::i16 , Custom);
264 setOperationAction(ISD::SELECT , MVT::i32 , Custom);
265 setOperationAction(ISD::SELECT , MVT::f32 , Custom);
266 setOperationAction(ISD::SELECT , MVT::f64 , Custom);
Dale Johannesen9e3d3ab2007-09-14 22:26:36 +0000267 setOperationAction(ISD::SELECT , MVT::f80 , Custom);
Evan Cheng5298bcc2006-02-17 07:01:52 +0000268 setOperationAction(ISD::SETCC , MVT::i8 , Custom);
269 setOperationAction(ISD::SETCC , MVT::i16 , Custom);
270 setOperationAction(ISD::SETCC , MVT::i32 , Custom);
271 setOperationAction(ISD::SETCC , MVT::f32 , Custom);
272 setOperationAction(ISD::SETCC , MVT::f64 , Custom);
Dale Johannesen9e3d3ab2007-09-14 22:26:36 +0000273 setOperationAction(ISD::SETCC , MVT::f80 , Custom);
Evan Cheng25ab6902006-09-08 06:48:29 +0000274 if (Subtarget->is64Bit()) {
275 setOperationAction(ISD::SELECT , MVT::i64 , Custom);
276 setOperationAction(ISD::SETCC , MVT::i64 , Custom);
277 }
Nate Begeman4c5dcf52006-02-17 00:03:04 +0000278 // X86 ret instruction may pop stack.
Evan Cheng5298bcc2006-02-17 07:01:52 +0000279 setOperationAction(ISD::RET , MVT::Other, Custom);
Anton Korobeynikov260a6b82008-09-08 21:12:11 +0000280 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
Anton Korobeynikov2365f512007-07-14 14:06:15 +0000281
Nate Begeman4c5dcf52006-02-17 00:03:04 +0000282 // Darwin ABI issue.
Evan Cheng7ccced62006-02-18 00:15:05 +0000283 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
Nate Begeman37efe672006-04-22 18:53:45 +0000284 setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
Evan Cheng5298bcc2006-02-17 07:01:52 +0000285 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
Lauro Ramos Venanciob3a04172007-04-20 21:38:10 +0000286 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom);
Anton Korobeynikov6625eff2008-05-04 21:36:32 +0000287 if (Subtarget->is64Bit())
288 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
Bill Wendling056292f2008-09-16 21:48:12 +0000289 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
Evan Cheng25ab6902006-09-08 06:48:29 +0000290 if (Subtarget->is64Bit()) {
291 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom);
292 setOperationAction(ISD::JumpTable , MVT::i64 , Custom);
293 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
Bill Wendling056292f2008-09-16 21:48:12 +0000294 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom);
Evan Cheng25ab6902006-09-08 06:48:29 +0000295 }
Nate Begeman4c5dcf52006-02-17 00:03:04 +0000296 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
Evan Cheng5298bcc2006-02-17 07:01:52 +0000297 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
298 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
299 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
Dan Gohman4c1fa612008-03-03 22:22:09 +0000300 if (Subtarget->is64Bit()) {
301 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom);
302 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom);
303 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom);
304 }
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000305
Evan Chengd2cde682008-03-10 19:38:10 +0000306 if (Subtarget->hasSSE1())
307 setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
Evan Cheng27b7db52008-03-08 00:58:38 +0000308
Andrew Lenharthd497d9f2008-02-16 14:46:26 +0000309 if (!Subtarget->hasSSE2())
310 setOperationAction(ISD::MEMBARRIER , MVT::Other, Expand);
311
Mon P Wang63307c32008-05-05 19:05:59 +0000312 // Expand certain atomics
Dan Gohman0b1d4a72008-12-23 21:37:04 +0000313 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i8, Custom);
314 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i16, Custom);
315 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
316 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom);
Bill Wendling5bf1b4e2008-08-20 00:28:16 +0000317
Dan Gohman0b1d4a72008-12-23 21:37:04 +0000318 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i8, Custom);
319 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i16, Custom);
320 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom);
321 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom);
Andrew Lenharthd497d9f2008-02-16 14:46:26 +0000322
Dale Johannesen48c1bc22008-10-02 18:53:47 +0000323 if (!Subtarget->is64Bit()) {
Dan Gohman0b1d4a72008-12-23 21:37:04 +0000324 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom);
325 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom);
326 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom);
327 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Custom);
328 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom);
329 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Custom);
330 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom);
Dale Johannesen48c1bc22008-10-02 18:53:47 +0000331 }
332
Dan Gohman7f460202008-06-30 20:59:49 +0000333 // Use the default ISD::DBG_STOPPOINT, ISD::DECLARE expansion.
334 setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
Evan Cheng3c992d22006-03-07 02:02:57 +0000335 // FIXME - use subtarget debug flags
Anton Korobeynikovab4022f2006-10-31 08:31:24 +0000336 if (!Subtarget->isTargetDarwin() &&
337 !Subtarget->isTargetELF() &&
Dan Gohman44066042008-07-01 00:05:16 +0000338 !Subtarget->isTargetCygMing()) {
339 setOperationAction(ISD::DBG_LABEL, MVT::Other, Expand);
340 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
341 }
Chris Lattnerf73bae12005-11-29 06:16:21 +0000342
Anton Korobeynikovce3b4652007-05-02 19:53:33 +0000343 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
344 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand);
345 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
346 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
347 if (Subtarget->is64Bit()) {
Anton Korobeynikovce3b4652007-05-02 19:53:33 +0000348 setExceptionPointerRegister(X86::RAX);
349 setExceptionSelectorRegister(X86::RDX);
350 } else {
351 setExceptionPointerRegister(X86::EAX);
352 setExceptionSelectorRegister(X86::EDX);
353 }
Anton Korobeynikov38252622007-09-03 00:36:06 +0000354 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
Anton Korobeynikov260a6b82008-09-08 21:12:11 +0000355 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
356
Duncan Sandsf7331b32007-09-11 14:10:23 +0000357 setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom);
Duncan Sandsb116fac2007-07-27 20:02:49 +0000358
Chris Lattnerda68d302008-01-15 21:58:22 +0000359 setOperationAction(ISD::TRAP, MVT::Other, Legal);
Anton Korobeynikov66fac792008-01-15 07:02:33 +0000360
Nate Begemanacc398c2006-01-25 18:21:52 +0000361 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
362 setOperationAction(ISD::VASTART , MVT::Other, Custom);
Nate Begemanacc398c2006-01-25 18:21:52 +0000363 setOperationAction(ISD::VAEND , MVT::Other, Expand);
Dan Gohman9018e832008-05-10 01:26:14 +0000364 if (Subtarget->is64Bit()) {
365 setOperationAction(ISD::VAARG , MVT::Other, Custom);
Evan Chengae642192007-03-02 23:16:35 +0000366 setOperationAction(ISD::VACOPY , MVT::Other, Custom);
Dan Gohman9018e832008-05-10 01:26:14 +0000367 } else {
368 setOperationAction(ISD::VAARG , MVT::Other, Expand);
Evan Chengae642192007-03-02 23:16:35 +0000369 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
Dan Gohman9018e832008-05-10 01:26:14 +0000370 }
Evan Chengae642192007-03-02 23:16:35 +0000371
Anton Korobeynikov12c49af2006-11-21 00:01:06 +0000372 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
Chris Lattnere1125522006-01-15 09:00:21 +0000373 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
Evan Cheng25ab6902006-09-08 06:48:29 +0000374 if (Subtarget->is64Bit())
375 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
Anton Korobeynikov57fc00d2007-04-17 09:20:00 +0000376 if (Subtarget->isTargetCygMing())
377 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
378 else
379 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
Chris Lattnerb99329e2006-01-13 02:42:53 +0000380
Evan Chengc7ce29b2009-02-13 22:36:38 +0000381 if (!UseSoftFloat && X86ScalarSSEf64) {
Dale Johannesenf1fc3a82007-09-23 14:52:20 +0000382 // f32 and f64 use SSE.
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000383 // Set up the FP register classes.
Evan Cheng5ee4ccc2006-01-12 08:27:59 +0000384 addRegisterClass(MVT::f32, X86::FR32RegisterClass);
385 addRegisterClass(MVT::f64, X86::FR64RegisterClass);
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000386
Evan Cheng223547a2006-01-31 22:28:30 +0000387 // Use ANDPD to simulate FABS.
388 setOperationAction(ISD::FABS , MVT::f64, Custom);
389 setOperationAction(ISD::FABS , MVT::f32, Custom);
390
391 // Use XORP to simulate FNEG.
392 setOperationAction(ISD::FNEG , MVT::f64, Custom);
393 setOperationAction(ISD::FNEG , MVT::f32, Custom);
394
Evan Cheng68c47cb2007-01-05 07:55:56 +0000395 // Use ANDPD and ORPD to simulate FCOPYSIGN.
396 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
397 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
398
Evan Chengd25e9e82006-02-02 00:28:23 +0000399 // We don't support sin/cos/fmod
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000400 setOperationAction(ISD::FSIN , MVT::f64, Expand);
401 setOperationAction(ISD::FCOS , MVT::f64, Expand);
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000402 setOperationAction(ISD::FSIN , MVT::f32, Expand);
403 setOperationAction(ISD::FCOS , MVT::f32, Expand);
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000404
Chris Lattnera54aa942006-01-29 06:26:08 +0000405 // Expand FP immediates into loads from the stack, except for the special
406 // cases we handle.
Dale Johannesenf1fc3a82007-09-23 14:52:20 +0000407 addLegalFPImmediate(APFloat(+0.0)); // xorpd
408 addLegalFPImmediate(APFloat(+0.0f)); // xorps
Evan Chengc7ce29b2009-02-13 22:36:38 +0000409 } else if (!UseSoftFloat && X86ScalarSSEf32) {
Dale Johannesenf1fc3a82007-09-23 14:52:20 +0000410 // Use SSE for f32, x87 for f64.
411 // Set up the FP register classes.
412 addRegisterClass(MVT::f32, X86::FR32RegisterClass);
413 addRegisterClass(MVT::f64, X86::RFP64RegisterClass);
414
415 // Use ANDPS to simulate FABS.
416 setOperationAction(ISD::FABS , MVT::f32, Custom);
417
418 // Use XORP to simulate FNEG.
419 setOperationAction(ISD::FNEG , MVT::f32, Custom);
420
421 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
422
423 // Use ANDPS and ORPS to simulate FCOPYSIGN.
424 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
425 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
426
427 // We don't support sin/cos/fmod
428 setOperationAction(ISD::FSIN , MVT::f32, Expand);
429 setOperationAction(ISD::FCOS , MVT::f32, Expand);
Dale Johannesenf1fc3a82007-09-23 14:52:20 +0000430
Nate Begemane1795842008-02-14 08:57:00 +0000431 // Special cases we handle for FP constants.
Dale Johannesenf1fc3a82007-09-23 14:52:20 +0000432 addLegalFPImmediate(APFloat(+0.0f)); // xorps
433 addLegalFPImmediate(APFloat(+0.0)); // FLD0
434 addLegalFPImmediate(APFloat(+1.0)); // FLD1
435 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
436 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
437
Dale Johannesenf1fc3a82007-09-23 14:52:20 +0000438 if (!UnsafeFPMath) {
439 setOperationAction(ISD::FSIN , MVT::f64 , Expand);
440 setOperationAction(ISD::FCOS , MVT::f64 , Expand);
441 }
Evan Chengc7ce29b2009-02-13 22:36:38 +0000442 } else if (!UseSoftFloat) {
Dale Johannesenf1fc3a82007-09-23 14:52:20 +0000443 // f32 and f64 in x87.
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000444 // Set up the FP register classes.
Dale Johannesen849f2142007-07-03 00:53:03 +0000445 addRegisterClass(MVT::f64, X86::RFP64RegisterClass);
446 addRegisterClass(MVT::f32, X86::RFP32RegisterClass);
Anton Korobeynikov12c49af2006-11-21 00:01:06 +0000447
Evan Cheng68c47cb2007-01-05 07:55:56 +0000448 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
Dale Johannesen849f2142007-07-03 00:53:03 +0000449 setOperationAction(ISD::UNDEF, MVT::f32, Expand);
Evan Cheng68c47cb2007-01-05 07:55:56 +0000450 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
451 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
Dale Johannesen5411a392007-08-09 01:04:01 +0000452
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000453 if (!UnsafeFPMath) {
454 setOperationAction(ISD::FSIN , MVT::f64 , Expand);
455 setOperationAction(ISD::FCOS , MVT::f64 , Expand);
456 }
Dale Johannesenf04afdb2007-08-30 00:23:21 +0000457 addLegalFPImmediate(APFloat(+0.0)); // FLD0
458 addLegalFPImmediate(APFloat(+1.0)); // FLD1
459 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
460 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
Dale Johannesenf1fc3a82007-09-23 14:52:20 +0000461 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
462 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
463 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
464 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000465 }
Evan Cheng470a6ad2006-02-22 02:26:30 +0000466
Dale Johannesen59a58732007-08-05 18:49:15 +0000467 // Long double always uses X87.
Evan Cheng92722532009-03-26 23:06:32 +0000468 if (!UseSoftFloat) {
Evan Chengc7ce29b2009-02-13 22:36:38 +0000469 addRegisterClass(MVT::f80, X86::RFP80RegisterClass);
470 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
471 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
472 {
473 bool ignored;
474 APFloat TmpFlt(+0.0);
475 TmpFlt.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
476 &ignored);
477 addLegalFPImmediate(TmpFlt); // FLD0
478 TmpFlt.changeSign();
479 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
480 APFloat TmpFlt2(+1.0);
481 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
482 &ignored);
483 addLegalFPImmediate(TmpFlt2); // FLD1
484 TmpFlt2.changeSign();
485 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
486 }
Scott Michelfdc40a02009-02-17 22:15:04 +0000487
Evan Chengc7ce29b2009-02-13 22:36:38 +0000488 if (!UnsafeFPMath) {
489 setOperationAction(ISD::FSIN , MVT::f80 , Expand);
490 setOperationAction(ISD::FCOS , MVT::f80 , Expand);
491 }
Dale Johannesen2f429012007-09-26 21:10:55 +0000492 }
Dale Johannesen59a58732007-08-05 18:49:15 +0000493
Dan Gohmanf96e4de2007-10-11 23:21:31 +0000494 // Always use a library call for pow.
495 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
496 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
497 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
498
Dale Johannesen7794f2a2008-09-04 00:47:13 +0000499 setOperationAction(ISD::FLOG, MVT::f80, Expand);
Dale Johannesen7794f2a2008-09-04 00:47:13 +0000500 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
Dale Johannesen7794f2a2008-09-04 00:47:13 +0000501 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
Dale Johannesen7794f2a2008-09-04 00:47:13 +0000502 setOperationAction(ISD::FEXP, MVT::f80, Expand);
Dale Johannesen7794f2a2008-09-04 00:47:13 +0000503 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
504
Mon P Wangf007a8b2008-11-06 05:31:54 +0000505 // First set operation action for all vector types to either promote
Mon P Wang0c397192008-10-30 08:01:45 +0000506 // (for widening) or expand (for scalarization). Then we will selectively
507 // turn on ones that can be effectively codegen'd.
Dan Gohmanfa0f77d2007-05-18 18:44:07 +0000508 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
509 VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) {
Duncan Sands83ec4b62008-06-06 12:08:01 +0000510 setOperationAction(ISD::ADD , (MVT::SimpleValueType)VT, Expand);
511 setOperationAction(ISD::SUB , (MVT::SimpleValueType)VT, Expand);
512 setOperationAction(ISD::FADD, (MVT::SimpleValueType)VT, Expand);
513 setOperationAction(ISD::FNEG, (MVT::SimpleValueType)VT, Expand);
514 setOperationAction(ISD::FSUB, (MVT::SimpleValueType)VT, Expand);
515 setOperationAction(ISD::MUL , (MVT::SimpleValueType)VT, Expand);
516 setOperationAction(ISD::FMUL, (MVT::SimpleValueType)VT, Expand);
517 setOperationAction(ISD::SDIV, (MVT::SimpleValueType)VT, Expand);
518 setOperationAction(ISD::UDIV, (MVT::SimpleValueType)VT, Expand);
519 setOperationAction(ISD::FDIV, (MVT::SimpleValueType)VT, Expand);
520 setOperationAction(ISD::SREM, (MVT::SimpleValueType)VT, Expand);
521 setOperationAction(ISD::UREM, (MVT::SimpleValueType)VT, Expand);
522 setOperationAction(ISD::LOAD, (MVT::SimpleValueType)VT, Expand);
Gabor Greif327ef032008-08-28 23:19:51 +0000523 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::SimpleValueType)VT, Expand);
524 setOperationAction(ISD::EXTRACT_VECTOR_ELT,(MVT::SimpleValueType)VT,Expand);
Eli Friedman108b5192009-05-23 22:44:52 +0000525 setOperationAction(ISD::EXTRACT_SUBVECTOR,(MVT::SimpleValueType)VT,Expand);
Gabor Greif327ef032008-08-28 23:19:51 +0000526 setOperationAction(ISD::INSERT_VECTOR_ELT,(MVT::SimpleValueType)VT, Expand);
Duncan Sands83ec4b62008-06-06 12:08:01 +0000527 setOperationAction(ISD::FABS, (MVT::SimpleValueType)VT, Expand);
528 setOperationAction(ISD::FSIN, (MVT::SimpleValueType)VT, Expand);
529 setOperationAction(ISD::FCOS, (MVT::SimpleValueType)VT, Expand);
530 setOperationAction(ISD::FREM, (MVT::SimpleValueType)VT, Expand);
531 setOperationAction(ISD::FPOWI, (MVT::SimpleValueType)VT, Expand);
532 setOperationAction(ISD::FSQRT, (MVT::SimpleValueType)VT, Expand);
533 setOperationAction(ISD::FCOPYSIGN, (MVT::SimpleValueType)VT, Expand);
534 setOperationAction(ISD::SMUL_LOHI, (MVT::SimpleValueType)VT, Expand);
535 setOperationAction(ISD::UMUL_LOHI, (MVT::SimpleValueType)VT, Expand);
536 setOperationAction(ISD::SDIVREM, (MVT::SimpleValueType)VT, Expand);
537 setOperationAction(ISD::UDIVREM, (MVT::SimpleValueType)VT, Expand);
538 setOperationAction(ISD::FPOW, (MVT::SimpleValueType)VT, Expand);
539 setOperationAction(ISD::CTPOP, (MVT::SimpleValueType)VT, Expand);
540 setOperationAction(ISD::CTTZ, (MVT::SimpleValueType)VT, Expand);
541 setOperationAction(ISD::CTLZ, (MVT::SimpleValueType)VT, Expand);
542 setOperationAction(ISD::SHL, (MVT::SimpleValueType)VT, Expand);
543 setOperationAction(ISD::SRA, (MVT::SimpleValueType)VT, Expand);
544 setOperationAction(ISD::SRL, (MVT::SimpleValueType)VT, Expand);
545 setOperationAction(ISD::ROTL, (MVT::SimpleValueType)VT, Expand);
546 setOperationAction(ISD::ROTR, (MVT::SimpleValueType)VT, Expand);
547 setOperationAction(ISD::BSWAP, (MVT::SimpleValueType)VT, Expand);
548 setOperationAction(ISD::VSETCC, (MVT::SimpleValueType)VT, Expand);
Dale Johannesenfb0e1322008-09-10 17:31:40 +0000549 setOperationAction(ISD::FLOG, (MVT::SimpleValueType)VT, Expand);
550 setOperationAction(ISD::FLOG2, (MVT::SimpleValueType)VT, Expand);
551 setOperationAction(ISD::FLOG10, (MVT::SimpleValueType)VT, Expand);
552 setOperationAction(ISD::FEXP, (MVT::SimpleValueType)VT, Expand);
553 setOperationAction(ISD::FEXP2, (MVT::SimpleValueType)VT, Expand);
Eli Friedman23ef1052009-06-06 03:57:58 +0000554 setOperationAction(ISD::FP_TO_UINT, (MVT::SimpleValueType)VT, Expand);
555 setOperationAction(ISD::FP_TO_SINT, (MVT::SimpleValueType)VT, Expand);
556 setOperationAction(ISD::UINT_TO_FP, (MVT::SimpleValueType)VT, Expand);
557 setOperationAction(ISD::SINT_TO_FP, (MVT::SimpleValueType)VT, Expand);
Evan Chengd30bf012006-03-01 01:11:20 +0000558 }
559
Evan Chengc7ce29b2009-02-13 22:36:38 +0000560 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
561 // with -msoft-float, disable use of MMX as well.
Evan Cheng92722532009-03-26 23:06:32 +0000562 if (!UseSoftFloat && !DisableMMX && Subtarget->hasMMX()) {
Evan Cheng470a6ad2006-02-22 02:26:30 +0000563 addRegisterClass(MVT::v8i8, X86::VR64RegisterClass);
564 addRegisterClass(MVT::v4i16, X86::VR64RegisterClass);
565 addRegisterClass(MVT::v2i32, X86::VR64RegisterClass);
Dale Johannesena68f9012008-06-24 22:01:44 +0000566 addRegisterClass(MVT::v2f32, X86::VR64RegisterClass);
Bill Wendlingeebc8a12007-03-26 07:53:08 +0000567 addRegisterClass(MVT::v1i64, X86::VR64RegisterClass);
Evan Cheng470a6ad2006-02-22 02:26:30 +0000568
Bill Wendling2f88dcd2007-03-08 22:09:11 +0000569 setOperationAction(ISD::ADD, MVT::v8i8, Legal);
570 setOperationAction(ISD::ADD, MVT::v4i16, Legal);
571 setOperationAction(ISD::ADD, MVT::v2i32, Legal);
Chris Lattner6c284d72007-04-12 04:14:49 +0000572 setOperationAction(ISD::ADD, MVT::v1i64, Legal);
Bill Wendling2f88dcd2007-03-08 22:09:11 +0000573
Bill Wendlingc1fb0472007-03-10 09:57:05 +0000574 setOperationAction(ISD::SUB, MVT::v8i8, Legal);
575 setOperationAction(ISD::SUB, MVT::v4i16, Legal);
576 setOperationAction(ISD::SUB, MVT::v2i32, Legal);
Dale Johannesen8d26e592007-10-30 01:18:38 +0000577 setOperationAction(ISD::SUB, MVT::v1i64, Legal);
Bill Wendlingc1fb0472007-03-10 09:57:05 +0000578
Bill Wendling74027e92007-03-15 21:24:36 +0000579 setOperationAction(ISD::MULHS, MVT::v4i16, Legal);
580 setOperationAction(ISD::MUL, MVT::v4i16, Legal);
581
Bill Wendling1b7a81d2007-03-16 09:44:46 +0000582 setOperationAction(ISD::AND, MVT::v8i8, Promote);
Bill Wendlingab5b49d2007-03-26 08:03:33 +0000583 AddPromotedToType (ISD::AND, MVT::v8i8, MVT::v1i64);
Bill Wendling1b7a81d2007-03-16 09:44:46 +0000584 setOperationAction(ISD::AND, MVT::v4i16, Promote);
Bill Wendlingab5b49d2007-03-26 08:03:33 +0000585 AddPromotedToType (ISD::AND, MVT::v4i16, MVT::v1i64);
586 setOperationAction(ISD::AND, MVT::v2i32, Promote);
587 AddPromotedToType (ISD::AND, MVT::v2i32, MVT::v1i64);
588 setOperationAction(ISD::AND, MVT::v1i64, Legal);
Bill Wendling1b7a81d2007-03-16 09:44:46 +0000589
590 setOperationAction(ISD::OR, MVT::v8i8, Promote);
Bill Wendlingab5b49d2007-03-26 08:03:33 +0000591 AddPromotedToType (ISD::OR, MVT::v8i8, MVT::v1i64);
Bill Wendling1b7a81d2007-03-16 09:44:46 +0000592 setOperationAction(ISD::OR, MVT::v4i16, Promote);
Bill Wendlingab5b49d2007-03-26 08:03:33 +0000593 AddPromotedToType (ISD::OR, MVT::v4i16, MVT::v1i64);
594 setOperationAction(ISD::OR, MVT::v2i32, Promote);
595 AddPromotedToType (ISD::OR, MVT::v2i32, MVT::v1i64);
596 setOperationAction(ISD::OR, MVT::v1i64, Legal);
Bill Wendling1b7a81d2007-03-16 09:44:46 +0000597
598 setOperationAction(ISD::XOR, MVT::v8i8, Promote);
Bill Wendlingab5b49d2007-03-26 08:03:33 +0000599 AddPromotedToType (ISD::XOR, MVT::v8i8, MVT::v1i64);
Bill Wendling1b7a81d2007-03-16 09:44:46 +0000600 setOperationAction(ISD::XOR, MVT::v4i16, Promote);
Bill Wendlingab5b49d2007-03-26 08:03:33 +0000601 AddPromotedToType (ISD::XOR, MVT::v4i16, MVT::v1i64);
602 setOperationAction(ISD::XOR, MVT::v2i32, Promote);
603 AddPromotedToType (ISD::XOR, MVT::v2i32, MVT::v1i64);
604 setOperationAction(ISD::XOR, MVT::v1i64, Legal);
Bill Wendling1b7a81d2007-03-16 09:44:46 +0000605
Bill Wendling2f88dcd2007-03-08 22:09:11 +0000606 setOperationAction(ISD::LOAD, MVT::v8i8, Promote);
Bill Wendlingeebc8a12007-03-26 07:53:08 +0000607 AddPromotedToType (ISD::LOAD, MVT::v8i8, MVT::v1i64);
Bill Wendling2f88dcd2007-03-08 22:09:11 +0000608 setOperationAction(ISD::LOAD, MVT::v4i16, Promote);
Bill Wendlingeebc8a12007-03-26 07:53:08 +0000609 AddPromotedToType (ISD::LOAD, MVT::v4i16, MVT::v1i64);
610 setOperationAction(ISD::LOAD, MVT::v2i32, Promote);
611 AddPromotedToType (ISD::LOAD, MVT::v2i32, MVT::v1i64);
Dale Johannesena68f9012008-06-24 22:01:44 +0000612 setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
613 AddPromotedToType (ISD::LOAD, MVT::v2f32, MVT::v1i64);
Bill Wendlingeebc8a12007-03-26 07:53:08 +0000614 setOperationAction(ISD::LOAD, MVT::v1i64, Legal);
Bill Wendling2f88dcd2007-03-08 22:09:11 +0000615
Bill Wendlingccc44ad2007-03-27 20:22:40 +0000616 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom);
617 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
618 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom);
Dale Johannesena68f9012008-06-24 22:01:44 +0000619 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f32, Custom);
Bill Wendlingccc44ad2007-03-27 20:22:40 +0000620 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom);
Bill Wendlinga348c562007-03-22 18:42:45 +0000621
622 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom);
623 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom);
624 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom);
Bill Wendlingccc44ad2007-03-27 20:22:40 +0000625 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom);
Bill Wendling826f36f2007-03-28 00:57:11 +0000626
Evan Cheng52672b82008-07-22 18:39:19 +0000627 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f32, Custom);
Bill Wendling826f36f2007-03-28 00:57:11 +0000628 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Custom);
629 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Custom);
Bill Wendling2f9bb1a2007-04-24 21:16:55 +0000630 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Custom);
Bill Wendling3180e202008-07-20 02:32:23 +0000631
632 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom);
Mon P Wang9e5ecb82008-12-12 01:25:51 +0000633
Bill Wendlingf9abd7e2009-03-11 22:30:01 +0000634 setTruncStoreAction(MVT::v8i16, MVT::v8i8, Expand);
Mon P Wang9e5ecb82008-12-12 01:25:51 +0000635 setOperationAction(ISD::TRUNCATE, MVT::v8i8, Expand);
636 setOperationAction(ISD::SELECT, MVT::v8i8, Promote);
637 setOperationAction(ISD::SELECT, MVT::v4i16, Promote);
638 setOperationAction(ISD::SELECT, MVT::v2i32, Promote);
639 setOperationAction(ISD::SELECT, MVT::v1i64, Custom);
Evan Cheng470a6ad2006-02-22 02:26:30 +0000640 }
641
Evan Cheng92722532009-03-26 23:06:32 +0000642 if (!UseSoftFloat && Subtarget->hasSSE1()) {
Evan Cheng470a6ad2006-02-22 02:26:30 +0000643 addRegisterClass(MVT::v4f32, X86::VR128RegisterClass);
644
Evan Cheng6bdb3f62006-10-27 18:49:08 +0000645 setOperationAction(ISD::FADD, MVT::v4f32, Legal);
646 setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
647 setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
648 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
Dan Gohman20382522007-07-10 00:05:58 +0000649 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
650 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
Evan Chengf7c378e2006-04-10 07:23:14 +0000651 setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
652 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
653 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
Evan Cheng11e15b32006-04-03 20:53:28 +0000654 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
Evan Chengf7c378e2006-04-10 07:23:14 +0000655 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
Nate Begeman30a0de92008-07-17 16:51:19 +0000656 setOperationAction(ISD::VSETCC, MVT::v4f32, Custom);
Evan Cheng470a6ad2006-02-22 02:26:30 +0000657 }
658
Evan Cheng92722532009-03-26 23:06:32 +0000659 if (!UseSoftFloat && Subtarget->hasSSE2()) {
Evan Cheng470a6ad2006-02-22 02:26:30 +0000660 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass);
Evan Chengc7ce29b2009-02-13 22:36:38 +0000661
Bill Wendlingf9abd7e2009-03-11 22:30:01 +0000662 // FIXME: Unfortunately -soft-float and -no-implicit-float means XMM
663 // registers cannot be used even for integer operations.
Evan Cheng470a6ad2006-02-22 02:26:30 +0000664 addRegisterClass(MVT::v16i8, X86::VR128RegisterClass);
665 addRegisterClass(MVT::v8i16, X86::VR128RegisterClass);
666 addRegisterClass(MVT::v4i32, X86::VR128RegisterClass);
667 addRegisterClass(MVT::v2i64, X86::VR128RegisterClass);
668
Evan Chengf7c378e2006-04-10 07:23:14 +0000669 setOperationAction(ISD::ADD, MVT::v16i8, Legal);
670 setOperationAction(ISD::ADD, MVT::v8i16, Legal);
671 setOperationAction(ISD::ADD, MVT::v4i32, Legal);
Evan Cheng37e88562007-03-12 22:58:52 +0000672 setOperationAction(ISD::ADD, MVT::v2i64, Legal);
Mon P Wangaf9b9522008-12-18 21:42:19 +0000673 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
Evan Chengf7c378e2006-04-10 07:23:14 +0000674 setOperationAction(ISD::SUB, MVT::v16i8, Legal);
675 setOperationAction(ISD::SUB, MVT::v8i16, Legal);
676 setOperationAction(ISD::SUB, MVT::v4i32, Legal);
Evan Cheng37e88562007-03-12 22:58:52 +0000677 setOperationAction(ISD::SUB, MVT::v2i64, Legal);
Evan Chengf9989842006-04-13 05:10:25 +0000678 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
Evan Cheng6bdb3f62006-10-27 18:49:08 +0000679 setOperationAction(ISD::FADD, MVT::v2f64, Legal);
680 setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
681 setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
682 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
Dan Gohman20382522007-07-10 00:05:58 +0000683 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
684 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
Evan Cheng2c3ae372006-04-12 21:21:57 +0000685
Nate Begeman30a0de92008-07-17 16:51:19 +0000686 setOperationAction(ISD::VSETCC, MVT::v2f64, Custom);
687 setOperationAction(ISD::VSETCC, MVT::v16i8, Custom);
688 setOperationAction(ISD::VSETCC, MVT::v8i16, Custom);
689 setOperationAction(ISD::VSETCC, MVT::v4i32, Custom);
Nate Begemanc2616e42008-05-12 20:34:32 +0000690
Evan Chengf7c378e2006-04-10 07:23:14 +0000691 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
692 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
Evan Chengb067a1e2006-03-31 19:22:53 +0000693 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
Evan Cheng5edb8d22006-04-17 22:04:06 +0000694 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
Evan Cheng5edb8d22006-04-17 22:04:06 +0000695 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
Evan Chengf7c378e2006-04-10 07:23:14 +0000696
Evan Cheng2c3ae372006-04-12 21:21:57 +0000697 // Custom lower build_vector, vector_shuffle, and extract_vector_elt.
Duncan Sands83ec4b62008-06-06 12:08:01 +0000698 for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v2i64; ++i) {
699 MVT VT = (MVT::SimpleValueType)i;
Nate Begeman844e0f92007-12-11 01:41:33 +0000700 // Do not attempt to custom lower non-power-of-2 vectors
Duncan Sands83ec4b62008-06-06 12:08:01 +0000701 if (!isPowerOf2_32(VT.getVectorNumElements()))
Nate Begeman844e0f92007-12-11 01:41:33 +0000702 continue;
Duncan Sands83ec4b62008-06-06 12:08:01 +0000703 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
704 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
705 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
Evan Cheng2c3ae372006-04-12 21:21:57 +0000706 }
Bill Wendlingf9abd7e2009-03-11 22:30:01 +0000707
Evan Cheng2c3ae372006-04-12 21:21:57 +0000708 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
709 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
710 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
711 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
Nate Begemancdd1eec2008-02-12 22:51:28 +0000712 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
Evan Cheng2c3ae372006-04-12 21:21:57 +0000713 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
Bill Wendlingf9abd7e2009-03-11 22:30:01 +0000714
Nate Begemancdd1eec2008-02-12 22:51:28 +0000715 if (Subtarget->is64Bit()) {
716 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
Dale Johannesen25f1d082007-10-31 00:32:36 +0000717 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
Nate Begemancdd1eec2008-02-12 22:51:28 +0000718 }
Evan Cheng2c3ae372006-04-12 21:21:57 +0000719
Anton Korobeynikov12c49af2006-11-21 00:01:06 +0000720 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
Evan Cheng2c3ae372006-04-12 21:21:57 +0000721 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) {
Duncan Sands83ec4b62008-06-06 12:08:01 +0000722 setOperationAction(ISD::AND, (MVT::SimpleValueType)VT, Promote);
723 AddPromotedToType (ISD::AND, (MVT::SimpleValueType)VT, MVT::v2i64);
724 setOperationAction(ISD::OR, (MVT::SimpleValueType)VT, Promote);
725 AddPromotedToType (ISD::OR, (MVT::SimpleValueType)VT, MVT::v2i64);
726 setOperationAction(ISD::XOR, (MVT::SimpleValueType)VT, Promote);
727 AddPromotedToType (ISD::XOR, (MVT::SimpleValueType)VT, MVT::v2i64);
728 setOperationAction(ISD::LOAD, (MVT::SimpleValueType)VT, Promote);
729 AddPromotedToType (ISD::LOAD, (MVT::SimpleValueType)VT, MVT::v2i64);
730 setOperationAction(ISD::SELECT, (MVT::SimpleValueType)VT, Promote);
731 AddPromotedToType (ISD::SELECT, (MVT::SimpleValueType)VT, MVT::v2i64);
Evan Chengf7c378e2006-04-10 07:23:14 +0000732 }
Evan Cheng2c3ae372006-04-12 21:21:57 +0000733
Chris Lattnerddf89562008-01-17 19:59:44 +0000734 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
Chris Lattnerd43d00c2008-01-24 08:07:48 +0000735
Evan Cheng2c3ae372006-04-12 21:21:57 +0000736 // Custom lower v2i64 and v2f64 selects.
737 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
Evan Cheng91b740d2006-04-12 17:12:36 +0000738 setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
Evan Chengf7c378e2006-04-10 07:23:14 +0000739 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
Evan Cheng2c3ae372006-04-12 21:21:57 +0000740 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
Scott Michelfdc40a02009-02-17 22:15:04 +0000741
Eli Friedman23ef1052009-06-06 03:57:58 +0000742 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
743 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
744 if (!DisableMMX && Subtarget->hasMMX()) {
745 setOperationAction(ISD::FP_TO_SINT, MVT::v2i32, Custom);
746 setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom);
747 }
Evan Cheng470a6ad2006-02-22 02:26:30 +0000748 }
Evan Chengc7ce29b2009-02-13 22:36:38 +0000749
Nate Begeman14d12ca2008-02-11 04:19:36 +0000750 if (Subtarget->hasSSE41()) {
751 // FIXME: Do we need to handle scalar-to-vector here?
752 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
753
754 // i8 and i16 vectors are custom , because the source register and source
755 // source memory operand types are not the same width. f32 vectors are
756 // custom since the immediate controlling the insert encodes additional
757 // information.
758 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
759 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
Mon P Wangf0fcdd82009-01-15 21:10:20 +0000760 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
Nate Begeman14d12ca2008-02-11 04:19:36 +0000761 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
762
763 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom);
764 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom);
Mon P Wangf0fcdd82009-01-15 21:10:20 +0000765 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom);
Evan Cheng62a3f152008-03-24 21:52:23 +0000766 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
Nate Begeman14d12ca2008-02-11 04:19:36 +0000767
768 if (Subtarget->is64Bit()) {
Nate Begemancdd1eec2008-02-12 22:51:28 +0000769 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Legal);
770 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal);
Nate Begeman14d12ca2008-02-11 04:19:36 +0000771 }
772 }
Evan Cheng470a6ad2006-02-22 02:26:30 +0000773
Nate Begeman30a0de92008-07-17 16:51:19 +0000774 if (Subtarget->hasSSE42()) {
775 setOperationAction(ISD::VSETCC, MVT::v2i64, Custom);
776 }
Scott Michelfdc40a02009-02-17 22:15:04 +0000777
Evan Cheng6be2c582006-04-05 23:38:46 +0000778 // We want to custom lower some of our intrinsics.
779 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
780
Bill Wendling74c37652008-12-09 22:08:41 +0000781 // Add/Sub/Mul with overflow operations are custom lowered.
Bill Wendling41ea7e72008-11-24 19:21:46 +0000782 setOperationAction(ISD::SADDO, MVT::i32, Custom);
783 setOperationAction(ISD::SADDO, MVT::i64, Custom);
784 setOperationAction(ISD::UADDO, MVT::i32, Custom);
785 setOperationAction(ISD::UADDO, MVT::i64, Custom);
Bill Wendling74c37652008-12-09 22:08:41 +0000786 setOperationAction(ISD::SSUBO, MVT::i32, Custom);
787 setOperationAction(ISD::SSUBO, MVT::i64, Custom);
788 setOperationAction(ISD::USUBO, MVT::i32, Custom);
789 setOperationAction(ISD::USUBO, MVT::i64, Custom);
790 setOperationAction(ISD::SMULO, MVT::i32, Custom);
791 setOperationAction(ISD::SMULO, MVT::i64, Custom);
Bill Wendling41ea7e72008-11-24 19:21:46 +0000792
Evan Chengd54f2d52009-03-31 19:38:51 +0000793 if (!Subtarget->is64Bit()) {
794 // These libcalls are not available in 32-bit.
795 setLibcallName(RTLIB::SHL_I128, 0);
796 setLibcallName(RTLIB::SRL_I128, 0);
797 setLibcallName(RTLIB::SRA_I128, 0);
798 }
799
Evan Cheng206ee9d2006-07-07 08:33:52 +0000800 // We have target-specific dag combine patterns for the following nodes:
801 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
Evan Chengd880b972008-05-09 21:53:03 +0000802 setTargetDAGCombine(ISD::BUILD_VECTOR);
Chris Lattner83e6c992006-10-04 06:57:07 +0000803 setTargetDAGCombine(ISD::SELECT);
Nate Begeman740ab032009-01-26 00:52:55 +0000804 setTargetDAGCombine(ISD::SHL);
805 setTargetDAGCombine(ISD::SRA);
806 setTargetDAGCombine(ISD::SRL);
Chris Lattner149a4e52008-02-22 02:09:43 +0000807 setTargetDAGCombine(ISD::STORE);
Evan Cheng0b0cd912009-03-28 05:57:29 +0000808 if (Subtarget->is64Bit())
809 setTargetDAGCombine(ISD::MUL);
Evan Cheng206ee9d2006-07-07 08:33:52 +0000810
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000811 computeRegisterProperties();
812
Evan Cheng87ed7162006-02-14 08:25:08 +0000813 // FIXME: These should be based on subtarget info. Plus, the values should
814 // be smaller when we are in optimizing for size mode.
Dan Gohman87060f52008-06-30 21:00:56 +0000815 maxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
816 maxStoresPerMemcpy = 16; // For @llvm.memcpy -> sequence of stores
817 maxStoresPerMemmove = 3; // For @llvm.memmove -> sequence of stores
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000818 allowUnalignedMemoryAccesses = true; // x86 supports it!
Evan Chengfb8075d2008-02-28 00:43:03 +0000819 setPrefLoopAlignment(16);
Evan Cheng6ebf7bc2009-05-13 21:42:09 +0000820 benefitFromCodePlacementOpt = true;
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000821}
822
Scott Michel5b8f82e2008-03-10 15:42:14 +0000823
Duncan Sands5480c042009-01-01 15:52:00 +0000824MVT X86TargetLowering::getSetCCResultType(MVT VT) const {
Scott Michel5b8f82e2008-03-10 15:42:14 +0000825 return MVT::i8;
826}
827
828
Evan Cheng29286502008-01-23 23:17:41 +0000829/// getMaxByValAlign - Helper for getByValTypeAlignment to determine
830/// the desired ByVal argument alignment.
831static void getMaxByValAlign(const Type *Ty, unsigned &MaxAlign) {
832 if (MaxAlign == 16)
833 return;
834 if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
835 if (VTy->getBitWidth() == 128)
836 MaxAlign = 16;
Evan Cheng29286502008-01-23 23:17:41 +0000837 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
838 unsigned EltAlign = 0;
839 getMaxByValAlign(ATy->getElementType(), EltAlign);
840 if (EltAlign > MaxAlign)
841 MaxAlign = EltAlign;
842 } else if (const StructType *STy = dyn_cast<StructType>(Ty)) {
843 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
844 unsigned EltAlign = 0;
845 getMaxByValAlign(STy->getElementType(i), EltAlign);
846 if (EltAlign > MaxAlign)
847 MaxAlign = EltAlign;
848 if (MaxAlign == 16)
849 break;
850 }
851 }
852 return;
853}
854
855/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
856/// function arguments in the caller parameter area. For X86, aggregates
Dale Johannesen0c191872008-02-08 19:48:20 +0000857/// that contain SSE vectors are placed at 16-byte boundaries while the rest
858/// are at 4-byte boundaries.
Evan Cheng29286502008-01-23 23:17:41 +0000859unsigned X86TargetLowering::getByValTypeAlignment(const Type *Ty) const {
Evan Cheng1887c1c2008-08-21 21:00:15 +0000860 if (Subtarget->is64Bit()) {
861 // Max of 8 and alignment of type.
Anton Korobeynikovbff66b02008-09-09 18:22:57 +0000862 unsigned TyAlign = TD->getABITypeAlignment(Ty);
Evan Cheng1887c1c2008-08-21 21:00:15 +0000863 if (TyAlign > 8)
864 return TyAlign;
865 return 8;
866 }
867
Evan Cheng29286502008-01-23 23:17:41 +0000868 unsigned Align = 4;
Dale Johannesen0c191872008-02-08 19:48:20 +0000869 if (Subtarget->hasSSE1())
870 getMaxByValAlign(Ty, Align);
Evan Cheng29286502008-01-23 23:17:41 +0000871 return Align;
872}
Chris Lattner2b02a442007-02-25 08:29:00 +0000873
Evan Chengf0df0312008-05-15 08:39:06 +0000874/// getOptimalMemOpType - Returns the target specific optimal type for load
Evan Cheng0ef8de32008-05-15 22:13:02 +0000875/// and store operations as a result of memset, memcpy, and memmove
876/// lowering. It returns MVT::iAny if SelectionDAG should be responsible for
Evan Chengf0df0312008-05-15 08:39:06 +0000877/// determining it.
Duncan Sands83ec4b62008-06-06 12:08:01 +0000878MVT
Evan Chengf0df0312008-05-15 08:39:06 +0000879X86TargetLowering::getOptimalMemOpType(uint64_t Size, unsigned Align,
Devang Patel578efa92009-06-05 21:57:13 +0000880 bool isSrcConst, bool isSrcStr,
881 SelectionDAG &DAG) const {
Chris Lattner4002a1b2008-10-28 05:49:35 +0000882 // FIXME: This turns off use of xmm stores for memset/memcpy on targets like
883 // linux. This is because the stack realignment code can't handle certain
884 // cases like PR2962. This should be removed when PR2962 is fixed.
Devang Patel578efa92009-06-05 21:57:13 +0000885 const Function *F = DAG.getMachineFunction().getFunction();
886 bool NoImplicitFloatOps = F->hasFnAttr(Attribute::NoImplicitFloat);
887 if (!NoImplicitFloatOps && Subtarget->getStackAlignment() >= 16) {
Chris Lattner4002a1b2008-10-28 05:49:35 +0000888 if ((isSrcConst || isSrcStr) && Subtarget->hasSSE2() && Size >= 16)
889 return MVT::v4i32;
890 if ((isSrcConst || isSrcStr) && Subtarget->hasSSE1() && Size >= 16)
891 return MVT::v4f32;
892 }
Evan Chengf0df0312008-05-15 08:39:06 +0000893 if (Subtarget->is64Bit() && Size >= 8)
894 return MVT::i64;
895 return MVT::i32;
896}
897
Evan Chengcc415862007-11-09 01:32:10 +0000898/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
899/// jumptable.
Dan Gohman475871a2008-07-27 21:46:04 +0000900SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
Evan Chengcc415862007-11-09 01:32:10 +0000901 SelectionDAG &DAG) const {
902 if (usesGlobalOffsetTable())
Dale Johannesenb300d2a2009-02-07 00:55:49 +0000903 return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy());
Evan Chengcc415862007-11-09 01:32:10 +0000904 if (!Subtarget->isPICStyleRIPRel())
Dale Johannesenb300d2a2009-02-07 00:55:49 +0000905 // This doesn't have DebugLoc associated with it, but is not really the
906 // same as a Register.
907 return DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc::getUnknownLoc(),
908 getPointerTy());
Evan Chengcc415862007-11-09 01:32:10 +0000909 return Table;
910}
911
Chris Lattner2b02a442007-02-25 08:29:00 +0000912//===----------------------------------------------------------------------===//
913// Return Value Calling Convention Implementation
914//===----------------------------------------------------------------------===//
915
Chris Lattner59ed56b2007-02-28 04:55:35 +0000916#include "X86GenCallingConv.inc"
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +0000917
Chris Lattner2a9bdd72007-02-25 09:12:39 +0000918/// LowerRET - Lower an ISD::RET node.
Dan Gohman475871a2008-07-27 21:46:04 +0000919SDValue X86TargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) {
Dale Johannesen6f38cb62009-02-07 19:59:05 +0000920 DebugLoc dl = Op.getDebugLoc();
Chris Lattner2a9bdd72007-02-25 09:12:39 +0000921 assert((Op.getNumOperands() & 1) == 1 && "ISD::RET should have odd # args");
Scott Michelfdc40a02009-02-17 22:15:04 +0000922
Chris Lattner9774c912007-02-27 05:28:59 +0000923 SmallVector<CCValAssign, 16> RVLocs;
924 unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv();
Chris Lattner52387be2007-06-19 00:13:10 +0000925 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
926 CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs);
Gabor Greifba36cb52008-08-28 21:40:38 +0000927 CCInfo.AnalyzeReturn(Op.getNode(), RetCC_X86);
Scott Michelfdc40a02009-02-17 22:15:04 +0000928
Chris Lattner2a9bdd72007-02-25 09:12:39 +0000929 // If this is the first return lowered for this function, add the regs to the
930 // liveout set for the function.
Chris Lattner84bc5422007-12-31 04:13:23 +0000931 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
Chris Lattner9774c912007-02-27 05:28:59 +0000932 for (unsigned i = 0; i != RVLocs.size(); ++i)
933 if (RVLocs[i].isRegLoc())
Chris Lattner84bc5422007-12-31 04:13:23 +0000934 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
Chris Lattner2a9bdd72007-02-25 09:12:39 +0000935 }
Dan Gohman475871a2008-07-27 21:46:04 +0000936 SDValue Chain = Op.getOperand(0);
Scott Michelfdc40a02009-02-17 22:15:04 +0000937
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +0000938 // Handle tail call return.
Arnold Schwaighofer30e62c02008-04-30 09:16:33 +0000939 Chain = GetPossiblePreceedingTailCall(Chain, X86ISD::TAILCALL);
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +0000940 if (Chain.getOpcode() == X86ISD::TAILCALL) {
Dan Gohman475871a2008-07-27 21:46:04 +0000941 SDValue TailCall = Chain;
942 SDValue TargetAddress = TailCall.getOperand(1);
943 SDValue StackAdjustment = TailCall.getOperand(2);
Chris Lattnerb4a6eaa2008-01-16 05:52:18 +0000944 assert(((TargetAddress.getOpcode() == ISD::Register &&
Arnold Schwaighofer290ae032008-09-22 14:50:07 +0000945 (cast<RegisterSDNode>(TargetAddress)->getReg() == X86::EAX ||
Arnold Schwaighoferbbd8c332009-06-12 16:26:57 +0000946 cast<RegisterSDNode>(TargetAddress)->getReg() == X86::R11)) ||
Bill Wendling056292f2008-09-16 21:48:12 +0000947 TargetAddress.getOpcode() == ISD::TargetExternalSymbol ||
Scott Michelfdc40a02009-02-17 22:15:04 +0000948 TargetAddress.getOpcode() == ISD::TargetGlobalAddress) &&
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +0000949 "Expecting an global address, external symbol, or register");
Chris Lattnerb4a6eaa2008-01-16 05:52:18 +0000950 assert(StackAdjustment.getOpcode() == ISD::Constant &&
951 "Expecting a const value");
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +0000952
Dan Gohman475871a2008-07-27 21:46:04 +0000953 SmallVector<SDValue,8> Operands;
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +0000954 Operands.push_back(Chain.getOperand(0));
955 Operands.push_back(TargetAddress);
956 Operands.push_back(StackAdjustment);
957 // Copy registers used by the call. Last operand is a flag so it is not
958 // copied.
Arnold Schwaighofer448175f2007-10-16 09:05:00 +0000959 for (unsigned i=3; i < TailCall.getNumOperands()-1; i++) {
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +0000960 Operands.push_back(Chain.getOperand(i));
961 }
Scott Michelfdc40a02009-02-17 22:15:04 +0000962 return DAG.getNode(X86ISD::TC_RETURN, dl, MVT::Other, &Operands[0],
Arnold Schwaighofer448175f2007-10-16 09:05:00 +0000963 Operands.size());
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +0000964 }
Scott Michelfdc40a02009-02-17 22:15:04 +0000965
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +0000966 // Regular return.
Dan Gohman475871a2008-07-27 21:46:04 +0000967 SDValue Flag;
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +0000968
Dan Gohman475871a2008-07-27 21:46:04 +0000969 SmallVector<SDValue, 6> RetOps;
Chris Lattner447ff682008-03-11 03:23:40 +0000970 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
971 // Operand #1 = Bytes To Pop
972 RetOps.push_back(DAG.getConstant(getBytesToPopOnReturn(), MVT::i16));
Scott Michelfdc40a02009-02-17 22:15:04 +0000973
Chris Lattner2a9bdd72007-02-25 09:12:39 +0000974 // Copy the result values into the output registers.
Chris Lattner8e6da152008-03-10 21:08:41 +0000975 for (unsigned i = 0; i != RVLocs.size(); ++i) {
976 CCValAssign &VA = RVLocs[i];
977 assert(VA.isRegLoc() && "Can only return in registers!");
Dan Gohman475871a2008-07-27 21:46:04 +0000978 SDValue ValToCopy = Op.getOperand(i*2+1);
Scott Michelfdc40a02009-02-17 22:15:04 +0000979
Chris Lattner447ff682008-03-11 03:23:40 +0000980 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
981 // the RET instruction and handled by the FP Stackifier.
Dan Gohman37eed792009-02-04 17:28:58 +0000982 if (VA.getLocReg() == X86::ST0 ||
983 VA.getLocReg() == X86::ST1) {
Chris Lattner447ff682008-03-11 03:23:40 +0000984 // If this is a copy from an xmm register to ST(0), use an FPExtend to
985 // change the value to the FP stack register class.
Dan Gohman37eed792009-02-04 17:28:58 +0000986 if (isScalarFPTypeInSSEReg(VA.getValVT()))
Dale Johannesenace16102009-02-03 19:33:06 +0000987 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
Chris Lattner447ff682008-03-11 03:23:40 +0000988 RetOps.push_back(ValToCopy);
989 // Don't emit a copytoreg.
990 continue;
991 }
Dale Johannesena68f9012008-06-24 22:01:44 +0000992
Evan Cheng242b38b2009-02-23 09:03:22 +0000993 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
994 // which is returned in RAX / RDX.
Evan Cheng6140a8b2009-02-22 08:05:12 +0000995 if (Subtarget->is64Bit()) {
996 MVT ValVT = ValToCopy.getValueType();
Evan Cheng242b38b2009-02-23 09:03:22 +0000997 if (ValVT.isVector() && ValVT.getSizeInBits() == 64) {
Evan Cheng6140a8b2009-02-22 08:05:12 +0000998 ValToCopy = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, ValToCopy);
Evan Cheng242b38b2009-02-23 09:03:22 +0000999 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1)
1000 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, ValToCopy);
1001 }
Evan Cheng6140a8b2009-02-22 08:05:12 +00001002 }
1003
Dale Johannesendd64c412009-02-04 00:33:20 +00001004 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag);
Chris Lattner2a9bdd72007-02-25 09:12:39 +00001005 Flag = Chain.getValue(1);
1006 }
Dan Gohman61a92132008-04-21 23:59:07 +00001007
1008 // The x86-64 ABI for returning structs by value requires that we copy
1009 // the sret argument into %rax for the return. We saved the argument into
1010 // a virtual register in the entry block, so now we copy the value out
1011 // and into %rax.
1012 if (Subtarget->is64Bit() &&
1013 DAG.getMachineFunction().getFunction()->hasStructRetAttr()) {
1014 MachineFunction &MF = DAG.getMachineFunction();
1015 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
1016 unsigned Reg = FuncInfo->getSRetReturnReg();
1017 if (!Reg) {
1018 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64));
1019 FuncInfo->setSRetReturnReg(Reg);
1020 }
Dale Johannesendd64c412009-02-04 00:33:20 +00001021 SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy());
Dan Gohman61a92132008-04-21 23:59:07 +00001022
Dale Johannesendd64c412009-02-04 00:33:20 +00001023 Chain = DAG.getCopyToReg(Chain, dl, X86::RAX, Val, Flag);
Dan Gohman61a92132008-04-21 23:59:07 +00001024 Flag = Chain.getValue(1);
1025 }
Scott Michelfdc40a02009-02-17 22:15:04 +00001026
Chris Lattner447ff682008-03-11 03:23:40 +00001027 RetOps[0] = Chain; // Update chain.
1028
1029 // Add the flag if we have it.
Gabor Greifba36cb52008-08-28 21:40:38 +00001030 if (Flag.getNode())
Chris Lattner447ff682008-03-11 03:23:40 +00001031 RetOps.push_back(Flag);
Scott Michelfdc40a02009-02-17 22:15:04 +00001032
1033 return DAG.getNode(X86ISD::RET_FLAG, dl,
Dale Johannesenace16102009-02-03 19:33:06 +00001034 MVT::Other, &RetOps[0], RetOps.size());
Chris Lattner2a9bdd72007-02-25 09:12:39 +00001035}
1036
1037
Chris Lattner3085e152007-02-25 08:59:22 +00001038/// LowerCallResult - Lower the result values of an ISD::CALL into the
1039/// appropriate copies out of appropriate physical registers. This assumes that
1040/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call
1041/// being lowered. The returns a SDNode with the same number of values as the
1042/// ISD::CALL.
1043SDNode *X86TargetLowering::
Scott Michelfdc40a02009-02-17 22:15:04 +00001044LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall,
Chris Lattner3085e152007-02-25 08:59:22 +00001045 unsigned CallingConv, SelectionDAG &DAG) {
Dale Johannesenace16102009-02-03 19:33:06 +00001046
Scott Michelfdc40a02009-02-17 22:15:04 +00001047 DebugLoc dl = TheCall->getDebugLoc();
Chris Lattnere32bbf62007-02-28 07:09:55 +00001048 // Assign locations to each value returned by this call.
Chris Lattner9774c912007-02-27 05:28:59 +00001049 SmallVector<CCValAssign, 16> RVLocs;
Dan Gohman095cc292008-09-13 01:54:27 +00001050 bool isVarArg = TheCall->isVarArg();
Torok Edwin3f142c32009-02-01 18:15:56 +00001051 bool Is64Bit = Subtarget->is64Bit();
Chris Lattner52387be2007-06-19 00:13:10 +00001052 CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs);
Chris Lattnere32bbf62007-02-28 07:09:55 +00001053 CCInfo.AnalyzeCallResult(TheCall, RetCC_X86);
1054
Dan Gohman475871a2008-07-27 21:46:04 +00001055 SmallVector<SDValue, 8> ResultVals;
Scott Michelfdc40a02009-02-17 22:15:04 +00001056
Chris Lattner3085e152007-02-25 08:59:22 +00001057 // Copy all of the result registers out of their specified physreg.
Chris Lattner8e6da152008-03-10 21:08:41 +00001058 for (unsigned i = 0; i != RVLocs.size(); ++i) {
Dan Gohman37eed792009-02-04 17:28:58 +00001059 CCValAssign &VA = RVLocs[i];
1060 MVT CopyVT = VA.getValVT();
Scott Michelfdc40a02009-02-17 22:15:04 +00001061
Torok Edwin3f142c32009-02-01 18:15:56 +00001062 // If this is x86-64, and we disabled SSE, we can't return FP values
Scott Michelfdc40a02009-02-17 22:15:04 +00001063 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
Torok Edwin3f142c32009-02-01 18:15:56 +00001064 ((Is64Bit || TheCall->isInreg()) && !Subtarget->hasSSE1())) {
1065 cerr << "SSE register return with SSE disabled\n";
1066 exit(1);
1067 }
1068
Chris Lattner8e6da152008-03-10 21:08:41 +00001069 // If this is a call to a function that returns an fp value on the floating
1070 // point stack, but where we prefer to use the value in xmm registers, copy
1071 // it out as F80 and use a truncate to move it from fp stack reg to xmm reg.
Dan Gohman37eed792009-02-04 17:28:58 +00001072 if ((VA.getLocReg() == X86::ST0 ||
1073 VA.getLocReg() == X86::ST1) &&
1074 isScalarFPTypeInSSEReg(VA.getValVT())) {
Chris Lattner8e6da152008-03-10 21:08:41 +00001075 CopyVT = MVT::f80;
Chris Lattner3085e152007-02-25 08:59:22 +00001076 }
Scott Michelfdc40a02009-02-17 22:15:04 +00001077
Evan Cheng79fb3b42009-02-20 20:43:02 +00001078 SDValue Val;
1079 if (Is64Bit && CopyVT.isVector() && CopyVT.getSizeInBits() == 64) {
Evan Cheng242b38b2009-02-23 09:03:22 +00001080 // For x86-64, MMX values are returned in XMM0 / XMM1 except for v1i64.
1081 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
1082 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
1083 MVT::v2i64, InFlag).getValue(1);
1084 Val = Chain.getValue(0);
1085 Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64,
1086 Val, DAG.getConstant(0, MVT::i64));
1087 } else {
1088 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
1089 MVT::i64, InFlag).getValue(1);
1090 Val = Chain.getValue(0);
1091 }
Evan Cheng79fb3b42009-02-20 20:43:02 +00001092 Val = DAG.getNode(ISD::BIT_CONVERT, dl, CopyVT, Val);
1093 } else {
1094 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
1095 CopyVT, InFlag).getValue(1);
1096 Val = Chain.getValue(0);
1097 }
Chris Lattner8e6da152008-03-10 21:08:41 +00001098 InFlag = Chain.getValue(2);
Chris Lattner112dedc2007-12-29 06:41:28 +00001099
Dan Gohman37eed792009-02-04 17:28:58 +00001100 if (CopyVT != VA.getValVT()) {
Chris Lattner8e6da152008-03-10 21:08:41 +00001101 // Round the F80 the right size, which also moves to the appropriate xmm
1102 // register.
Dan Gohman37eed792009-02-04 17:28:58 +00001103 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
Chris Lattner8e6da152008-03-10 21:08:41 +00001104 // This truncation won't change the value.
1105 DAG.getIntPtrConstant(1));
1106 }
Scott Michelfdc40a02009-02-17 22:15:04 +00001107
Chris Lattner8e6da152008-03-10 21:08:41 +00001108 ResultVals.push_back(Val);
Chris Lattner3085e152007-02-25 08:59:22 +00001109 }
Duncan Sands4bdcb612008-07-02 17:40:58 +00001110
Chris Lattner3085e152007-02-25 08:59:22 +00001111 // Merge everything together with a MERGE_VALUES node.
1112 ResultVals.push_back(Chain);
Dale Johannesenace16102009-02-03 19:33:06 +00001113 return DAG.getNode(ISD::MERGE_VALUES, dl, TheCall->getVTList(),
1114 &ResultVals[0], ResultVals.size()).getNode();
Chris Lattner2b02a442007-02-25 08:29:00 +00001115}
1116
1117
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +00001118//===----------------------------------------------------------------------===//
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +00001119// C & StdCall & Fast Calling Convention implementation
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +00001120//===----------------------------------------------------------------------===//
Anton Korobeynikovb10308e2007-01-28 13:31:35 +00001121// StdCall calling convention seems to be standard for many Windows' API
1122// routines and around. It differs from C calling convention just a little:
1123// callee should clean up the stack, not caller. Symbols should be also
1124// decorated in some fancy way :) It doesn't support any vector arguments.
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +00001125// For info on fast calling convention see Fast Calling Convention (tail call)
1126// implementation LowerX86_32FastCCCallTo.
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +00001127
Arnold Schwaighofer16a3e522008-02-26 17:50:59 +00001128/// CallIsStructReturn - Determines whether a CALL node uses struct return
1129/// semantics.
Dan Gohman095cc292008-09-13 01:54:27 +00001130static bool CallIsStructReturn(CallSDNode *TheCall) {
1131 unsigned NumOps = TheCall->getNumArgs();
Gordon Henriksen86737662008-01-05 16:56:59 +00001132 if (!NumOps)
1133 return false;
Duncan Sands276dcbd2008-03-21 09:14:45 +00001134
Dan Gohman095cc292008-09-13 01:54:27 +00001135 return TheCall->getArgFlags(0).isSRet();
Gordon Henriksen86737662008-01-05 16:56:59 +00001136}
1137
Arnold Schwaighofer16a3e522008-02-26 17:50:59 +00001138/// ArgsAreStructReturn - Determines whether a FORMAL_ARGUMENTS node uses struct
1139/// return semantics.
Dan Gohman475871a2008-07-27 21:46:04 +00001140static bool ArgsAreStructReturn(SDValue Op) {
Gabor Greifba36cb52008-08-28 21:40:38 +00001141 unsigned NumArgs = Op.getNode()->getNumValues() - 1;
Gordon Henriksen86737662008-01-05 16:56:59 +00001142 if (!NumArgs)
1143 return false;
Duncan Sands276dcbd2008-03-21 09:14:45 +00001144
1145 return cast<ARG_FLAGSSDNode>(Op.getOperand(3))->getArgFlags().isSRet();
Gordon Henriksen86737662008-01-05 16:56:59 +00001146}
1147
Arnold Schwaighofer4b5324a2008-04-12 18:11:06 +00001148/// IsCalleePop - Determines whether a CALL or FORMAL_ARGUMENTS node requires
1149/// the callee to pop its own arguments. Callee pop is necessary to support tail
Arnold Schwaighofer16a3e522008-02-26 17:50:59 +00001150/// calls.
Dan Gohman095cc292008-09-13 01:54:27 +00001151bool X86TargetLowering::IsCalleePop(bool IsVarArg, unsigned CallingConv) {
Gordon Henriksen86737662008-01-05 16:56:59 +00001152 if (IsVarArg)
1153 return false;
1154
Dan Gohman095cc292008-09-13 01:54:27 +00001155 switch (CallingConv) {
Gordon Henriksen86737662008-01-05 16:56:59 +00001156 default:
1157 return false;
1158 case CallingConv::X86_StdCall:
1159 return !Subtarget->is64Bit();
1160 case CallingConv::X86_FastCall:
1161 return !Subtarget->is64Bit();
1162 case CallingConv::Fast:
1163 return PerformTailCallOpt;
1164 }
1165}
1166
Dan Gohman095cc292008-09-13 01:54:27 +00001167/// CCAssignFnForNode - Selects the correct CCAssignFn for a the
1168/// given CallingConvention value.
1169CCAssignFn *X86TargetLowering::CCAssignFnForNode(unsigned CC) const {
Anton Korobeynikov7c1c2612008-02-20 11:22:39 +00001170 if (Subtarget->is64Bit()) {
Anton Korobeynikov1a979d92008-03-22 20:57:27 +00001171 if (Subtarget->isTargetWin64())
Anton Korobeynikov8f88cb02008-03-22 20:37:30 +00001172 return CC_X86_Win64_C;
Evan Chenge9ac9e62008-09-07 09:07:23 +00001173 else
1174 return CC_X86_64_C;
Anton Korobeynikov7c1c2612008-02-20 11:22:39 +00001175 }
1176
Gordon Henriksen86737662008-01-05 16:56:59 +00001177 if (CC == CallingConv::X86_FastCall)
1178 return CC_X86_32_FastCall;
Evan Chengb188dd92008-09-10 18:25:29 +00001179 else if (CC == CallingConv::Fast)
1180 return CC_X86_32_FastCC;
Gordon Henriksen86737662008-01-05 16:56:59 +00001181 else
1182 return CC_X86_32_C;
1183}
1184
Arnold Schwaighofer16a3e522008-02-26 17:50:59 +00001185/// NameDecorationForFORMAL_ARGUMENTS - Selects the appropriate decoration to
1186/// apply to a MachineFunction containing a given FORMAL_ARGUMENTS node.
Gordon Henriksen86737662008-01-05 16:56:59 +00001187NameDecorationStyle
Dan Gohman475871a2008-07-27 21:46:04 +00001188X86TargetLowering::NameDecorationForFORMAL_ARGUMENTS(SDValue Op) {
Dan Gohmanf5aeb1a2008-09-12 16:56:44 +00001189 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
Gordon Henriksen86737662008-01-05 16:56:59 +00001190 if (CC == CallingConv::X86_FastCall)
1191 return FastCall;
1192 else if (CC == CallingConv::X86_StdCall)
1193 return StdCall;
1194 return None;
1195}
1196
Arnold Schwaighoferc8ab8cd2008-01-11 16:49:42 +00001197
Arnold Schwaighofer258bb1b2008-02-26 22:21:54 +00001198/// CallRequiresGOTInRegister - Check whether the call requires the GOT pointer
1199/// in a register before calling.
1200bool X86TargetLowering::CallRequiresGOTPtrInReg(bool Is64Bit, bool IsTailCall) {
1201 return !IsTailCall && !Is64Bit &&
1202 getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
1203 Subtarget->isPICStyleGOT();
1204}
1205
Arnold Schwaighofer258bb1b2008-02-26 22:21:54 +00001206/// CallRequiresFnAddressInReg - Check whether the call requires the function
1207/// address to be loaded in a register.
Scott Michelfdc40a02009-02-17 22:15:04 +00001208bool
Arnold Schwaighofer258bb1b2008-02-26 22:21:54 +00001209X86TargetLowering::CallRequiresFnAddressInReg(bool Is64Bit, bool IsTailCall) {
Scott Michelfdc40a02009-02-17 22:15:04 +00001210 return !Is64Bit && IsTailCall &&
Arnold Schwaighofer258bb1b2008-02-26 22:21:54 +00001211 getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
1212 Subtarget->isPICStyleGOT();
1213}
1214
Arnold Schwaighofer16a3e522008-02-26 17:50:59 +00001215/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
1216/// by "Src" to address "Dst" with size and alignment information specified by
Arnold Schwaighofer4b5324a2008-04-12 18:11:06 +00001217/// the specific parameter attribute. The copy will be passed as a byval
1218/// function parameter.
Scott Michelfdc40a02009-02-17 22:15:04 +00001219static SDValue
Dan Gohman475871a2008-07-27 21:46:04 +00001220CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
Dale Johannesendd64c412009-02-04 00:33:20 +00001221 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
1222 DebugLoc dl) {
Dan Gohman475871a2008-07-27 21:46:04 +00001223 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
Dale Johannesendd64c412009-02-04 00:33:20 +00001224 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
Arnold Schwaighofer30e62c02008-04-30 09:16:33 +00001225 /*AlwaysInline=*/true, NULL, 0, NULL, 0);
Arnold Schwaighoferc8ab8cd2008-01-11 16:49:42 +00001226}
1227
Dan Gohman475871a2008-07-27 21:46:04 +00001228SDValue X86TargetLowering::LowerMemArgument(SDValue Op, SelectionDAG &DAG,
Rafael Espindola7effac52007-09-14 15:48:13 +00001229 const CCValAssign &VA,
1230 MachineFrameInfo *MFI,
Arnold Schwaighofer865c6812008-02-26 09:19:59 +00001231 unsigned CC,
Dan Gohman475871a2008-07-27 21:46:04 +00001232 SDValue Root, unsigned i) {
Rafael Espindola7effac52007-09-14 15:48:13 +00001233 // Create the nodes corresponding to a load from this parameter slot.
Duncan Sands276dcbd2008-03-21 09:14:45 +00001234 ISD::ArgFlagsTy Flags =
1235 cast<ARG_FLAGSSDNode>(Op.getOperand(3 + i))->getArgFlags();
Arnold Schwaighofer865c6812008-02-26 09:19:59 +00001236 bool AlwaysUseMutable = (CC==CallingConv::Fast) && PerformTailCallOpt;
Duncan Sands276dcbd2008-03-21 09:14:45 +00001237 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
Evan Chenge70bb592008-01-10 02:24:25 +00001238
Arnold Schwaighofer865c6812008-02-26 09:19:59 +00001239 // FIXME: For now, all byval parameter objects are marked mutable. This can be
Scott Michelfdc40a02009-02-17 22:15:04 +00001240 // changed with more analysis.
Arnold Schwaighofer865c6812008-02-26 09:19:59 +00001241 // In case of tail call optimization mark all arguments mutable. Since they
1242 // could be overwritten by lowering of arguments in case of a tail call.
Duncan Sands83ec4b62008-06-06 12:08:01 +00001243 int FI = MFI->CreateFixedObject(VA.getValVT().getSizeInBits()/8,
Arnold Schwaighofer865c6812008-02-26 09:19:59 +00001244 VA.getLocMemOffset(), isImmutable);
Dan Gohman475871a2008-07-27 21:46:04 +00001245 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
Duncan Sands276dcbd2008-03-21 09:14:45 +00001246 if (Flags.isByVal())
Rafael Espindola7effac52007-09-14 15:48:13 +00001247 return FIN;
Dale Johannesen6f38cb62009-02-07 19:59:05 +00001248 return DAG.getLoad(VA.getValVT(), Op.getDebugLoc(), Root, FIN,
Dan Gohmana54cf172008-07-11 22:44:52 +00001249 PseudoSourceValue::getFixedStack(FI), 0);
Rafael Espindola7effac52007-09-14 15:48:13 +00001250}
1251
Dan Gohman475871a2008-07-27 21:46:04 +00001252SDValue
1253X86TargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) {
Evan Cheng1bc78042006-04-26 01:20:17 +00001254 MachineFunction &MF = DAG.getMachineFunction();
Gordon Henriksen86737662008-01-05 16:56:59 +00001255 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
Dale Johannesen6f38cb62009-02-07 19:59:05 +00001256 DebugLoc dl = Op.getDebugLoc();
Scott Michelfdc40a02009-02-17 22:15:04 +00001257
Gordon Henriksen86737662008-01-05 16:56:59 +00001258 const Function* Fn = MF.getFunction();
1259 if (Fn->hasExternalLinkage() &&
1260 Subtarget->isTargetCygMing() &&
1261 Fn->getName() == "main")
1262 FuncInfo->setForceFramePointer(true);
1263
1264 // Decorate the function name.
1265 FuncInfo->setDecorationStyle(NameDecorationForFORMAL_ARGUMENTS(Op));
Scott Michelfdc40a02009-02-17 22:15:04 +00001266
Evan Cheng1bc78042006-04-26 01:20:17 +00001267 MachineFrameInfo *MFI = MF.getFrameInfo();
Dan Gohman475871a2008-07-27 21:46:04 +00001268 SDValue Root = Op.getOperand(0);
Dan Gohmanf5aeb1a2008-09-12 16:56:44 +00001269 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() != 0;
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +00001270 unsigned CC = MF.getFunction()->getCallingConv();
Gordon Henriksen86737662008-01-05 16:56:59 +00001271 bool Is64Bit = Subtarget->is64Bit();
Anton Korobeynikov998a5bc2008-04-27 23:15:03 +00001272 bool IsWin64 = Subtarget->isTargetWin64();
Gordon Henriksenae636f82008-01-03 16:47:34 +00001273
1274 assert(!(isVarArg && CC == CallingConv::Fast) &&
1275 "Var args not supported with calling convention fastcc");
1276
Chris Lattner638402b2007-02-28 07:00:42 +00001277 // Assign locations to all of the incoming arguments.
Chris Lattnerf39f7712007-02-28 05:46:49 +00001278 SmallVector<CCValAssign, 16> ArgLocs;
Gordon Henriksenae636f82008-01-03 16:47:34 +00001279 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
Dan Gohman095cc292008-09-13 01:54:27 +00001280 CCInfo.AnalyzeFormalArguments(Op.getNode(), CCAssignFnForNode(CC));
Scott Michelfdc40a02009-02-17 22:15:04 +00001281
Dan Gohman475871a2008-07-27 21:46:04 +00001282 SmallVector<SDValue, 8> ArgValues;
Chris Lattnerf39f7712007-02-28 05:46:49 +00001283 unsigned LastVal = ~0U;
1284 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1285 CCValAssign &VA = ArgLocs[i];
1286 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later
1287 // places.
1288 assert(VA.getValNo() != LastVal &&
1289 "Don't support value assigned to multiple locs yet");
1290 LastVal = VA.getValNo();
Scott Michelfdc40a02009-02-17 22:15:04 +00001291
Chris Lattnerf39f7712007-02-28 05:46:49 +00001292 if (VA.isRegLoc()) {
Duncan Sands83ec4b62008-06-06 12:08:01 +00001293 MVT RegVT = VA.getLocVT();
Devang Patel8a84e442009-01-05 17:31:22 +00001294 TargetRegisterClass *RC = NULL;
Chris Lattnerf39f7712007-02-28 05:46:49 +00001295 if (RegVT == MVT::i32)
1296 RC = X86::GR32RegisterClass;
Gordon Henriksen86737662008-01-05 16:56:59 +00001297 else if (Is64Bit && RegVT == MVT::i64)
1298 RC = X86::GR64RegisterClass;
Dale Johannesene672af12008-02-05 20:46:33 +00001299 else if (RegVT == MVT::f32)
Gordon Henriksen86737662008-01-05 16:56:59 +00001300 RC = X86::FR32RegisterClass;
Dale Johannesene672af12008-02-05 20:46:33 +00001301 else if (RegVT == MVT::f64)
Gordon Henriksen86737662008-01-05 16:56:59 +00001302 RC = X86::FR64RegisterClass;
Duncan Sands83ec4b62008-06-06 12:08:01 +00001303 else if (RegVT.isVector() && RegVT.getSizeInBits() == 128)
Evan Chengee472b12008-04-25 07:56:45 +00001304 RC = X86::VR128RegisterClass;
Duncan Sands83ec4b62008-06-06 12:08:01 +00001305 else if (RegVT.isVector()) {
1306 assert(RegVT.getSizeInBits() == 64);
Evan Chengee472b12008-04-25 07:56:45 +00001307 if (!Is64Bit)
1308 RC = X86::VR64RegisterClass; // MMX values are passed in MMXs.
1309 else {
1310 // Darwin calling convention passes MMX values in either GPRs or
1311 // XMMs in x86-64. Other targets pass them in memory.
1312 if (RegVT != MVT::v1i64 && Subtarget->hasSSE2()) {
1313 RC = X86::VR128RegisterClass; // MMX values are passed in XMMs.
1314 RegVT = MVT::v2i64;
1315 } else {
1316 RC = X86::GR64RegisterClass; // v1i64 values are passed in GPRs.
1317 RegVT = MVT::i64;
1318 }
1319 }
1320 } else {
1321 assert(0 && "Unknown argument type!");
Anton Korobeynikovb10308e2007-01-28 13:31:35 +00001322 }
Gordon Henriksenae636f82008-01-03 16:47:34 +00001323
Bob Wilson998e1252009-04-20 18:36:57 +00001324 unsigned Reg = DAG.getMachineFunction().addLiveIn(VA.getLocReg(), RC);
Dale Johannesendd64c412009-02-04 00:33:20 +00001325 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, RegVT);
Scott Michelfdc40a02009-02-17 22:15:04 +00001326
Chris Lattnerf39f7712007-02-28 05:46:49 +00001327 // If this is an 8 or 16-bit value, it is really passed promoted to 32
1328 // bits. Insert an assert[sz]ext to capture this, then truncate to the
1329 // right size.
1330 if (VA.getLocInfo() == CCValAssign::SExt)
Dale Johannesenace16102009-02-03 19:33:06 +00001331 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
Chris Lattnerf39f7712007-02-28 05:46:49 +00001332 DAG.getValueType(VA.getValVT()));
1333 else if (VA.getLocInfo() == CCValAssign::ZExt)
Dale Johannesenace16102009-02-03 19:33:06 +00001334 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
Chris Lattnerf39f7712007-02-28 05:46:49 +00001335 DAG.getValueType(VA.getValVT()));
Scott Michelfdc40a02009-02-17 22:15:04 +00001336
Chris Lattnerf39f7712007-02-28 05:46:49 +00001337 if (VA.getLocInfo() != CCValAssign::Full)
Dale Johannesenace16102009-02-03 19:33:06 +00001338 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
Scott Michelfdc40a02009-02-17 22:15:04 +00001339
Gordon Henriksen86737662008-01-05 16:56:59 +00001340 // Handle MMX values passed in GPRs.
Evan Cheng44c0fd12008-04-25 20:13:28 +00001341 if (Is64Bit && RegVT != VA.getLocVT()) {
Duncan Sands83ec4b62008-06-06 12:08:01 +00001342 if (RegVT.getSizeInBits() == 64 && RC == X86::GR64RegisterClass)
Dale Johannesenace16102009-02-03 19:33:06 +00001343 ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), ArgValue);
Evan Cheng44c0fd12008-04-25 20:13:28 +00001344 else if (RC == X86::VR128RegisterClass) {
Dale Johannesenace16102009-02-03 19:33:06 +00001345 ArgValue = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64,
1346 ArgValue, DAG.getConstant(0, MVT::i64));
1347 ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), ArgValue);
Evan Cheng44c0fd12008-04-25 20:13:28 +00001348 }
1349 }
Scott Michelfdc40a02009-02-17 22:15:04 +00001350
Chris Lattnerf39f7712007-02-28 05:46:49 +00001351 ArgValues.push_back(ArgValue);
1352 } else {
1353 assert(VA.isMemLoc());
Arnold Schwaighofer865c6812008-02-26 09:19:59 +00001354 ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, CC, Root, i));
Evan Cheng1bc78042006-04-26 01:20:17 +00001355 }
Evan Cheng1bc78042006-04-26 01:20:17 +00001356 }
Gordon Henriksenae636f82008-01-03 16:47:34 +00001357
Dan Gohman61a92132008-04-21 23:59:07 +00001358 // The x86-64 ABI for returning structs by value requires that we copy
1359 // the sret argument into %rax for the return. Save the argument into
1360 // a virtual register so that we can access it from the return points.
1361 if (Is64Bit && DAG.getMachineFunction().getFunction()->hasStructRetAttr()) {
1362 MachineFunction &MF = DAG.getMachineFunction();
1363 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
1364 unsigned Reg = FuncInfo->getSRetReturnReg();
1365 if (!Reg) {
1366 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64));
1367 FuncInfo->setSRetReturnReg(Reg);
1368 }
Dale Johannesendd64c412009-02-04 00:33:20 +00001369 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, ArgValues[0]);
Dale Johannesenace16102009-02-03 19:33:06 +00001370 Root = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Root);
Dan Gohman61a92132008-04-21 23:59:07 +00001371 }
1372
Chris Lattnerf39f7712007-02-28 05:46:49 +00001373 unsigned StackSize = CCInfo.getNextStackOffset();
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +00001374 // align stack specially for tail calls
Evan Chenge9ac9e62008-09-07 09:07:23 +00001375 if (PerformTailCallOpt && CC == CallingConv::Fast)
Gordon Henriksenae636f82008-01-03 16:47:34 +00001376 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
Evan Cheng25caf632006-05-23 21:06:34 +00001377
Evan Cheng1bc78042006-04-26 01:20:17 +00001378 // If the function takes variable number of arguments, make a frame index for
1379 // the start of the first vararg value... for expansion of llvm.va_start.
Gordon Henriksenae636f82008-01-03 16:47:34 +00001380 if (isVarArg) {
Gordon Henriksen86737662008-01-05 16:56:59 +00001381 if (Is64Bit || CC != CallingConv::X86_FastCall) {
1382 VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize);
1383 }
1384 if (Is64Bit) {
Anton Korobeynikov998a5bc2008-04-27 23:15:03 +00001385 unsigned TotalNumIntRegs = 0, TotalNumXMMRegs = 0;
1386
1387 // FIXME: We should really autogenerate these arrays
1388 static const unsigned GPR64ArgRegsWin64[] = {
1389 X86::RCX, X86::RDX, X86::R8, X86::R9
Gordon Henriksen86737662008-01-05 16:56:59 +00001390 };
Anton Korobeynikov998a5bc2008-04-27 23:15:03 +00001391 static const unsigned XMMArgRegsWin64[] = {
1392 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3
1393 };
1394 static const unsigned GPR64ArgRegs64Bit[] = {
1395 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
1396 };
1397 static const unsigned XMMArgRegs64Bit[] = {
Gordon Henriksen86737662008-01-05 16:56:59 +00001398 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
1399 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
1400 };
Anton Korobeynikov998a5bc2008-04-27 23:15:03 +00001401 const unsigned *GPR64ArgRegs, *XMMArgRegs;
1402
1403 if (IsWin64) {
1404 TotalNumIntRegs = 4; TotalNumXMMRegs = 4;
1405 GPR64ArgRegs = GPR64ArgRegsWin64;
1406 XMMArgRegs = XMMArgRegsWin64;
1407 } else {
1408 TotalNumIntRegs = 6; TotalNumXMMRegs = 8;
1409 GPR64ArgRegs = GPR64ArgRegs64Bit;
1410 XMMArgRegs = XMMArgRegs64Bit;
1411 }
1412 unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs,
1413 TotalNumIntRegs);
1414 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs,
1415 TotalNumXMMRegs);
1416
Devang Patel578efa92009-06-05 21:57:13 +00001417 bool NoImplicitFloatOps = Fn->hasFnAttr(Attribute::NoImplicitFloat);
Evan Chengc7ce29b2009-02-13 22:36:38 +00001418 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
Torok Edwin3f142c32009-02-01 18:15:56 +00001419 "SSE register cannot be used when SSE is disabled!");
Devang Patel578efa92009-06-05 21:57:13 +00001420 assert(!(NumXMMRegs && UseSoftFloat && NoImplicitFloatOps) &&
Evan Chengc7ce29b2009-02-13 22:36:38 +00001421 "SSE register cannot be used when SSE is disabled!");
Devang Patel578efa92009-06-05 21:57:13 +00001422 if (UseSoftFloat || NoImplicitFloatOps || !Subtarget->hasSSE1())
Torok Edwin3f142c32009-02-01 18:15:56 +00001423 // Kernel mode asks for SSE to be disabled, so don't push them
1424 // on the stack.
1425 TotalNumXMMRegs = 0;
Bill Wendlingf9abd7e2009-03-11 22:30:01 +00001426
Gordon Henriksen86737662008-01-05 16:56:59 +00001427 // For X86-64, if there are vararg parameters that are passed via
1428 // registers, then we must store them to their spots on the stack so they
1429 // may be loaded by deferencing the result of va_next.
1430 VarArgsGPOffset = NumIntRegs * 8;
Anton Korobeynikov998a5bc2008-04-27 23:15:03 +00001431 VarArgsFPOffset = TotalNumIntRegs * 8 + NumXMMRegs * 16;
1432 RegSaveFrameIndex = MFI->CreateStackObject(TotalNumIntRegs * 8 +
1433 TotalNumXMMRegs * 16, 16);
1434
Gordon Henriksen86737662008-01-05 16:56:59 +00001435 // Store the integer parameter registers.
Dan Gohman475871a2008-07-27 21:46:04 +00001436 SmallVector<SDValue, 8> MemOps;
1437 SDValue RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy());
Dale Johannesenace16102009-02-03 19:33:06 +00001438 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN,
Chris Lattner0bd48932008-01-17 07:00:52 +00001439 DAG.getIntPtrConstant(VarArgsGPOffset));
Anton Korobeynikov998a5bc2008-04-27 23:15:03 +00001440 for (; NumIntRegs != TotalNumIntRegs; ++NumIntRegs) {
Bob Wilson998e1252009-04-20 18:36:57 +00001441 unsigned VReg = MF.addLiveIn(GPR64ArgRegs[NumIntRegs],
1442 X86::GR64RegisterClass);
Dale Johannesendd64c412009-02-04 00:33:20 +00001443 SDValue Val = DAG.getCopyFromReg(Root, dl, VReg, MVT::i64);
Dan Gohman475871a2008-07-27 21:46:04 +00001444 SDValue Store =
Dale Johannesenace16102009-02-03 19:33:06 +00001445 DAG.getStore(Val.getValue(1), dl, Val, FIN,
Dan Gohmana54cf172008-07-11 22:44:52 +00001446 PseudoSourceValue::getFixedStack(RegSaveFrameIndex), 0);
Gordon Henriksen86737662008-01-05 16:56:59 +00001447 MemOps.push_back(Store);
Dale Johannesenace16102009-02-03 19:33:06 +00001448 FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN,
Chris Lattner0bd48932008-01-17 07:00:52 +00001449 DAG.getIntPtrConstant(8));
Gordon Henriksen86737662008-01-05 16:56:59 +00001450 }
Anton Korobeynikov998a5bc2008-04-27 23:15:03 +00001451
Gordon Henriksen86737662008-01-05 16:56:59 +00001452 // Now store the XMM (fp + vector) parameter registers.
Dale Johannesenace16102009-02-03 19:33:06 +00001453 FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN,
Chris Lattner0bd48932008-01-17 07:00:52 +00001454 DAG.getIntPtrConstant(VarArgsFPOffset));
Anton Korobeynikov998a5bc2008-04-27 23:15:03 +00001455 for (; NumXMMRegs != TotalNumXMMRegs; ++NumXMMRegs) {
Bob Wilson998e1252009-04-20 18:36:57 +00001456 unsigned VReg = MF.addLiveIn(XMMArgRegs[NumXMMRegs],
1457 X86::VR128RegisterClass);
Dale Johannesendd64c412009-02-04 00:33:20 +00001458 SDValue Val = DAG.getCopyFromReg(Root, dl, VReg, MVT::v4f32);
Dan Gohman475871a2008-07-27 21:46:04 +00001459 SDValue Store =
Dale Johannesenace16102009-02-03 19:33:06 +00001460 DAG.getStore(Val.getValue(1), dl, Val, FIN,
Dan Gohmana54cf172008-07-11 22:44:52 +00001461 PseudoSourceValue::getFixedStack(RegSaveFrameIndex), 0);
Gordon Henriksen86737662008-01-05 16:56:59 +00001462 MemOps.push_back(Store);
Dale Johannesenace16102009-02-03 19:33:06 +00001463 FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN,
Chris Lattner0bd48932008-01-17 07:00:52 +00001464 DAG.getIntPtrConstant(16));
Gordon Henriksen86737662008-01-05 16:56:59 +00001465 }
1466 if (!MemOps.empty())
Dale Johannesenace16102009-02-03 19:33:06 +00001467 Root = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
Gordon Henriksen86737662008-01-05 16:56:59 +00001468 &MemOps[0], MemOps.size());
1469 }
Gordon Henriksenae636f82008-01-03 16:47:34 +00001470 }
Scott Michelfdc40a02009-02-17 22:15:04 +00001471
Gordon Henriksenae636f82008-01-03 16:47:34 +00001472 ArgValues.push_back(Root);
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +00001473
Gordon Henriksen86737662008-01-05 16:56:59 +00001474 // Some CCs need callee pop.
Dan Gohman095cc292008-09-13 01:54:27 +00001475 if (IsCalleePop(isVarArg, CC)) {
Gordon Henriksen86737662008-01-05 16:56:59 +00001476 BytesToPopOnReturn = StackSize; // Callee pops everything.
Anton Korobeynikovb10308e2007-01-28 13:31:35 +00001477 BytesCallerReserves = 0;
1478 } else {
Anton Korobeynikov1d9bacc2007-03-06 08:12:33 +00001479 BytesToPopOnReturn = 0; // Callee pops nothing.
Chris Lattnerf39f7712007-02-28 05:46:49 +00001480 // If this is an sret function, the return should pop the hidden pointer.
Evan Chengb188dd92008-09-10 18:25:29 +00001481 if (!Is64Bit && CC != CallingConv::Fast && ArgsAreStructReturn(Op))
Scott Michelfdc40a02009-02-17 22:15:04 +00001482 BytesToPopOnReturn = 4;
Chris Lattnerf39f7712007-02-28 05:46:49 +00001483 BytesCallerReserves = StackSize;
Anton Korobeynikovb10308e2007-01-28 13:31:35 +00001484 }
Gordon Henriksenae636f82008-01-03 16:47:34 +00001485
Gordon Henriksen86737662008-01-05 16:56:59 +00001486 if (!Is64Bit) {
1487 RegSaveFrameIndex = 0xAAAAAAA; // RegSaveFrameIndex is X86-64 only.
1488 if (CC == CallingConv::X86_FastCall)
1489 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs.
1490 }
Evan Cheng25caf632006-05-23 21:06:34 +00001491
Anton Korobeynikova2780e12007-08-15 17:12:32 +00001492 FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn);
Evan Cheng1bc78042006-04-26 01:20:17 +00001493
Evan Cheng25caf632006-05-23 21:06:34 +00001494 // Return the new list of results.
Dale Johannesenace16102009-02-03 19:33:06 +00001495 return DAG.getNode(ISD::MERGE_VALUES, dl, Op.getNode()->getVTList(),
Duncan Sandsaaffa052008-12-01 11:41:29 +00001496 &ArgValues[0], ArgValues.size()).getValue(Op.getResNo());
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +00001497}
1498
Dan Gohman475871a2008-07-27 21:46:04 +00001499SDValue
Dan Gohman095cc292008-09-13 01:54:27 +00001500X86TargetLowering::LowerMemOpCallTo(CallSDNode *TheCall, SelectionDAG &DAG,
Dan Gohman475871a2008-07-27 21:46:04 +00001501 const SDValue &StackPtr,
Evan Chengdffbd832008-01-10 00:09:10 +00001502 const CCValAssign &VA,
Dan Gohman475871a2008-07-27 21:46:04 +00001503 SDValue Chain,
Dan Gohman095cc292008-09-13 01:54:27 +00001504 SDValue Arg, ISD::ArgFlagsTy Flags) {
Dale Johannesenace16102009-02-03 19:33:06 +00001505 DebugLoc dl = TheCall->getDebugLoc();
Dan Gohman4fdad172008-02-07 16:28:05 +00001506 unsigned LocMemOffset = VA.getLocMemOffset();
Dan Gohman475871a2008-07-27 21:46:04 +00001507 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
Dale Johannesenace16102009-02-03 19:33:06 +00001508 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
Duncan Sands276dcbd2008-03-21 09:14:45 +00001509 if (Flags.isByVal()) {
Dale Johannesendd64c412009-02-04 00:33:20 +00001510 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
Evan Chengdffbd832008-01-10 00:09:10 +00001511 }
Dale Johannesenace16102009-02-03 19:33:06 +00001512 return DAG.getStore(Chain, dl, Arg, PtrOff,
Dan Gohman3069b872008-02-07 18:41:25 +00001513 PseudoSourceValue::getStack(), LocMemOffset);
Evan Chengdffbd832008-01-10 00:09:10 +00001514}
1515
Bill Wendling64e87322009-01-16 19:25:27 +00001516/// EmitTailCallLoadRetAddr - Emit a load of return address if tail call
Arnold Schwaighofer4b5324a2008-04-12 18:11:06 +00001517/// optimization is performed and it is required.
Scott Michelfdc40a02009-02-17 22:15:04 +00001518SDValue
1519X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
Dan Gohman475871a2008-07-27 21:46:04 +00001520 SDValue &OutRetAddr,
Scott Michelfdc40a02009-02-17 22:15:04 +00001521 SDValue Chain,
1522 bool IsTailCall,
1523 bool Is64Bit,
Dale Johannesenace16102009-02-03 19:33:06 +00001524 int FPDiff,
1525 DebugLoc dl) {
Arnold Schwaighofer4b5324a2008-04-12 18:11:06 +00001526 if (!IsTailCall || FPDiff==0) return Chain;
1527
1528 // Adjust the Return address stack slot.
Duncan Sands83ec4b62008-06-06 12:08:01 +00001529 MVT VT = getPointerTy();
Arnold Schwaighofer4b5324a2008-04-12 18:11:06 +00001530 OutRetAddr = getReturnAddressFrameIndex(DAG);
Bill Wendling64e87322009-01-16 19:25:27 +00001531
Arnold Schwaighofer4b5324a2008-04-12 18:11:06 +00001532 // Load the "old" Return address.
Dale Johannesenace16102009-02-03 19:33:06 +00001533 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, NULL, 0);
Gabor Greifba36cb52008-08-28 21:40:38 +00001534 return SDValue(OutRetAddr.getNode(), 1);
Arnold Schwaighofer4b5324a2008-04-12 18:11:06 +00001535}
1536
1537/// EmitTailCallStoreRetAddr - Emit a store of the return adress if tail call
1538/// optimization is performed and it is required (FPDiff!=0).
Scott Michelfdc40a02009-02-17 22:15:04 +00001539static SDValue
1540EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF,
Dan Gohman475871a2008-07-27 21:46:04 +00001541 SDValue Chain, SDValue RetAddrFrIdx,
Dale Johannesenace16102009-02-03 19:33:06 +00001542 bool Is64Bit, int FPDiff, DebugLoc dl) {
Arnold Schwaighofer4b5324a2008-04-12 18:11:06 +00001543 // Store the return address to the appropriate stack slot.
1544 if (!FPDiff) return Chain;
1545 // Calculate the new stack slot for the return address.
1546 int SlotSize = Is64Bit ? 8 : 4;
Scott Michelfdc40a02009-02-17 22:15:04 +00001547 int NewReturnAddrFI =
Arnold Schwaighofer4b5324a2008-04-12 18:11:06 +00001548 MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize);
Duncan Sands83ec4b62008-06-06 12:08:01 +00001549 MVT VT = Is64Bit ? MVT::i64 : MVT::i32;
Dan Gohman475871a2008-07-27 21:46:04 +00001550 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT);
Scott Michelfdc40a02009-02-17 22:15:04 +00001551 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
Dan Gohmana54cf172008-07-11 22:44:52 +00001552 PseudoSourceValue::getFixedStack(NewReturnAddrFI), 0);
Arnold Schwaighofer4b5324a2008-04-12 18:11:06 +00001553 return Chain;
1554}
1555
Dan Gohman475871a2008-07-27 21:46:04 +00001556SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
Gordon Henriksen86737662008-01-05 16:56:59 +00001557 MachineFunction &MF = DAG.getMachineFunction();
Dan Gohman095cc292008-09-13 01:54:27 +00001558 CallSDNode *TheCall = cast<CallSDNode>(Op.getNode());
1559 SDValue Chain = TheCall->getChain();
1560 unsigned CC = TheCall->getCallingConv();
1561 bool isVarArg = TheCall->isVarArg();
1562 bool IsTailCall = TheCall->isTailCall() &&
1563 CC == CallingConv::Fast && PerformTailCallOpt;
1564 SDValue Callee = TheCall->getCallee();
Gordon Henriksen86737662008-01-05 16:56:59 +00001565 bool Is64Bit = Subtarget->is64Bit();
Dan Gohman095cc292008-09-13 01:54:27 +00001566 bool IsStructRet = CallIsStructReturn(TheCall);
Dale Johannesenace16102009-02-03 19:33:06 +00001567 DebugLoc dl = TheCall->getDebugLoc();
Gordon Henriksenae636f82008-01-03 16:47:34 +00001568
1569 assert(!(isVarArg && CC == CallingConv::Fast) &&
1570 "Var args not supported with calling convention fastcc");
1571
Chris Lattner638402b2007-02-28 07:00:42 +00001572 // Analyze operands of the call, assigning locations to each operand.
Chris Lattner423c5f42007-02-28 05:31:48 +00001573 SmallVector<CCValAssign, 16> ArgLocs;
Chris Lattner52387be2007-06-19 00:13:10 +00001574 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
Dan Gohman095cc292008-09-13 01:54:27 +00001575 CCInfo.AnalyzeCallOperands(TheCall, CCAssignFnForNode(CC));
Scott Michelfdc40a02009-02-17 22:15:04 +00001576
Chris Lattner423c5f42007-02-28 05:31:48 +00001577 // Get a count of how many bytes are to be pushed on the stack.
1578 unsigned NumBytes = CCInfo.getNextStackOffset();
Arnold Schwaighofer1fdc40f2008-09-11 20:28:43 +00001579 if (PerformTailCallOpt && CC == CallingConv::Fast)
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +00001580 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +00001581
Gordon Henriksen86737662008-01-05 16:56:59 +00001582 int FPDiff = 0;
1583 if (IsTailCall) {
1584 // Lower arguments at fp - stackoffset + fpdiff.
Scott Michelfdc40a02009-02-17 22:15:04 +00001585 unsigned NumBytesCallerPushed =
Gordon Henriksen86737662008-01-05 16:56:59 +00001586 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn();
1587 FPDiff = NumBytesCallerPushed - NumBytes;
1588
1589 // Set the delta of movement of the returnaddr stackslot.
1590 // But only set if delta is greater than previous delta.
1591 if (FPDiff < (MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta()))
1592 MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff);
1593 }
1594
Chris Lattnere563bbc2008-10-11 22:08:30 +00001595 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +00001596
Dan Gohman475871a2008-07-27 21:46:04 +00001597 SDValue RetAddrFrIdx;
Arnold Schwaighofer4b5324a2008-04-12 18:11:06 +00001598 // Load return adress for tail calls.
1599 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, IsTailCall, Is64Bit,
Dale Johannesenace16102009-02-03 19:33:06 +00001600 FPDiff, dl);
Gordon Henriksen86737662008-01-05 16:56:59 +00001601
Dan Gohman475871a2008-07-27 21:46:04 +00001602 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
1603 SmallVector<SDValue, 8> MemOpChains;
1604 SDValue StackPtr;
Chris Lattner423c5f42007-02-28 05:31:48 +00001605
Arnold Schwaighofer30e62c02008-04-30 09:16:33 +00001606 // Walk the register/memloc assignments, inserting copies/loads. In the case
1607 // of tail call optimization arguments are handle later.
Chris Lattner423c5f42007-02-28 05:31:48 +00001608 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1609 CCValAssign &VA = ArgLocs[i];
Dan Gohman095cc292008-09-13 01:54:27 +00001610 SDValue Arg = TheCall->getArg(i);
1611 ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i);
1612 bool isByVal = Flags.isByVal();
Scott Michelfdc40a02009-02-17 22:15:04 +00001613
Chris Lattner423c5f42007-02-28 05:31:48 +00001614 // Promote the value if needed.
1615 switch (VA.getLocInfo()) {
1616 default: assert(0 && "Unknown loc info!");
1617 case CCValAssign::Full: break;
1618 case CCValAssign::SExt:
Dale Johannesenace16102009-02-03 19:33:06 +00001619 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
Chris Lattner423c5f42007-02-28 05:31:48 +00001620 break;
1621 case CCValAssign::ZExt:
Dale Johannesenace16102009-02-03 19:33:06 +00001622 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
Chris Lattner423c5f42007-02-28 05:31:48 +00001623 break;
1624 case CCValAssign::AExt:
Dale Johannesenace16102009-02-03 19:33:06 +00001625 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
Chris Lattner423c5f42007-02-28 05:31:48 +00001626 break;
Evan Cheng6b5783d2006-05-25 18:56:34 +00001627 }
Scott Michelfdc40a02009-02-17 22:15:04 +00001628
Chris Lattner423c5f42007-02-28 05:31:48 +00001629 if (VA.isRegLoc()) {
Evan Cheng10e86422008-04-25 19:11:04 +00001630 if (Is64Bit) {
Duncan Sands83ec4b62008-06-06 12:08:01 +00001631 MVT RegVT = VA.getLocVT();
1632 if (RegVT.isVector() && RegVT.getSizeInBits() == 64)
Evan Cheng10e86422008-04-25 19:11:04 +00001633 switch (VA.getLocReg()) {
1634 default:
1635 break;
1636 case X86::RDI: case X86::RSI: case X86::RDX: case X86::RCX:
1637 case X86::R8: {
1638 // Special case: passing MMX values in GPR registers.
Dale Johannesenace16102009-02-03 19:33:06 +00001639 Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, Arg);
Evan Cheng10e86422008-04-25 19:11:04 +00001640 break;
1641 }
1642 case X86::XMM0: case X86::XMM1: case X86::XMM2: case X86::XMM3:
1643 case X86::XMM4: case X86::XMM5: case X86::XMM6: case X86::XMM7: {
1644 // Special case: passing MMX values in XMM registers.
Dale Johannesenace16102009-02-03 19:33:06 +00001645 Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, Arg);
1646 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
Nate Begeman9008ca62009-04-27 18:41:29 +00001647 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
Evan Cheng10e86422008-04-25 19:11:04 +00001648 break;
1649 }
1650 }
1651 }
Chris Lattner423c5f42007-02-28 05:31:48 +00001652 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1653 } else {
Arnold Schwaighofer4b5324a2008-04-12 18:11:06 +00001654 if (!IsTailCall || (IsTailCall && isByVal)) {
Arnold Schwaighoferc8ab8cd2008-01-11 16:49:42 +00001655 assert(VA.isMemLoc());
Gabor Greifba36cb52008-08-28 21:40:38 +00001656 if (StackPtr.getNode() == 0)
Dale Johannesendd64c412009-02-04 00:33:20 +00001657 StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr, getPointerTy());
Scott Michelfdc40a02009-02-17 22:15:04 +00001658
Dan Gohman095cc292008-09-13 01:54:27 +00001659 MemOpChains.push_back(LowerMemOpCallTo(TheCall, DAG, StackPtr, VA,
1660 Chain, Arg, Flags));
Arnold Schwaighoferc8ab8cd2008-01-11 16:49:42 +00001661 }
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +00001662 }
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +00001663 }
Scott Michelfdc40a02009-02-17 22:15:04 +00001664
Evan Cheng32fe1032006-05-25 00:59:30 +00001665 if (!MemOpChains.empty())
Dale Johannesenace16102009-02-03 19:33:06 +00001666 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
Chris Lattnerbd564bf2006-08-08 02:23:42 +00001667 &MemOpChains[0], MemOpChains.size());
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +00001668
Evan Cheng347d5f72006-04-28 21:29:37 +00001669 // Build a sequence of copy-to-reg nodes chained together with token chain
1670 // and flag operands which copy the outgoing args into registers.
Dan Gohman475871a2008-07-27 21:46:04 +00001671 SDValue InFlag;
Arnold Schwaighofer30e62c02008-04-30 09:16:33 +00001672 // Tail call byval lowering might overwrite argument registers so in case of
1673 // tail call optimization the copies to registers are lowered later.
1674 if (!IsTailCall)
1675 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
Scott Michelfdc40a02009-02-17 22:15:04 +00001676 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
Dale Johannesendd64c412009-02-04 00:33:20 +00001677 RegsToPass[i].second, InFlag);
Arnold Schwaighofer30e62c02008-04-30 09:16:33 +00001678 InFlag = Chain.getValue(1);
1679 }
Gordon Henriksen86737662008-01-05 16:56:59 +00001680
Evan Chengf4684712007-02-21 21:18:14 +00001681 // ELF / PIC requires GOT in the EBX register before function calls via PLT
Scott Michelfdc40a02009-02-17 22:15:04 +00001682 // GOT pointer.
Arnold Schwaighofer258bb1b2008-02-26 22:21:54 +00001683 if (CallRequiresGOTPtrInReg(Is64Bit, IsTailCall)) {
Dale Johannesendd64c412009-02-04 00:33:20 +00001684 Chain = DAG.getCopyToReg(Chain, dl, X86::EBX,
Scott Michelfdc40a02009-02-17 22:15:04 +00001685 DAG.getNode(X86ISD::GlobalBaseReg,
1686 DebugLoc::getUnknownLoc(),
Dale Johannesenb300d2a2009-02-07 00:55:49 +00001687 getPointerTy()),
Arnold Schwaighofer258bb1b2008-02-26 22:21:54 +00001688 InFlag);
1689 InFlag = Chain.getValue(1);
1690 }
Arnold Schwaighofera2a4b472008-02-26 10:21:54 +00001691 // If we are tail calling and generating PIC/GOT style code load the address
1692 // of the callee into ecx. The value in ecx is used as target of the tail
1693 // jump. This is done to circumvent the ebx/callee-saved problem for tail
1694 // calls on PIC/GOT architectures. Normally we would just put the address of
1695 // GOT into ebx and then call target@PLT. But for tail callss ebx would be
1696 // restored (since ebx is callee saved) before jumping to the target@PLT.
Arnold Schwaighofer258bb1b2008-02-26 22:21:54 +00001697 if (CallRequiresFnAddressInReg(Is64Bit, IsTailCall)) {
Arnold Schwaighofera2a4b472008-02-26 10:21:54 +00001698 // Note: The actual moving to ecx is done further down.
1699 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
Evan Chengda43bcf2008-09-24 00:05:32 +00001700 if (G && !G->getGlobal()->hasHiddenVisibility() &&
Arnold Schwaighofera2a4b472008-02-26 10:21:54 +00001701 !G->getGlobal()->hasProtectedVisibility())
1702 Callee = LowerGlobalAddress(Callee, DAG);
Bill Wendling056292f2008-09-16 21:48:12 +00001703 else if (isa<ExternalSymbolSDNode>(Callee))
1704 Callee = LowerExternalSymbol(Callee,DAG);
Anton Korobeynikov7f705592007-01-12 19:20:47 +00001705 }
Gordon Henriksenae636f82008-01-03 16:47:34 +00001706
Gordon Henriksen86737662008-01-05 16:56:59 +00001707 if (Is64Bit && isVarArg) {
1708 // From AMD64 ABI document:
1709 // For calls that may call functions that use varargs or stdargs
1710 // (prototype-less calls or calls to functions containing ellipsis (...) in
1711 // the declaration) %al is used as hidden argument to specify the number
1712 // of SSE registers used. The contents of %al do not need to match exactly
1713 // the number of registers, but must be an ubound on the number of SSE
1714 // registers used and is in the range 0 - 8 inclusive.
Anton Korobeynikov998a5bc2008-04-27 23:15:03 +00001715
1716 // FIXME: Verify this on Win64
Gordon Henriksen86737662008-01-05 16:56:59 +00001717 // Count the number of XMM registers allocated.
1718 static const unsigned XMMArgRegs[] = {
1719 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
1720 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
1721 };
1722 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
Scott Michelfdc40a02009-02-17 22:15:04 +00001723 assert((Subtarget->hasSSE1() || !NumXMMRegs)
Torok Edwin3f142c32009-02-01 18:15:56 +00001724 && "SSE registers cannot be used when SSE is disabled");
Scott Michelfdc40a02009-02-17 22:15:04 +00001725
Dale Johannesendd64c412009-02-04 00:33:20 +00001726 Chain = DAG.getCopyToReg(Chain, dl, X86::AL,
Gordon Henriksen86737662008-01-05 16:56:59 +00001727 DAG.getConstant(NumXMMRegs, MVT::i8), InFlag);
1728 InFlag = Chain.getValue(1);
1729 }
1730
Arnold Schwaighofer865c6812008-02-26 09:19:59 +00001731
Arnold Schwaighoferc8ab8cd2008-01-11 16:49:42 +00001732 // For tail calls lower the arguments to the 'real' stack slot.
Gordon Henriksen86737662008-01-05 16:56:59 +00001733 if (IsTailCall) {
Dan Gohman475871a2008-07-27 21:46:04 +00001734 SmallVector<SDValue, 8> MemOpChains2;
1735 SDValue FIN;
Gordon Henriksen86737662008-01-05 16:56:59 +00001736 int FI = 0;
Arnold Schwaighofer865c6812008-02-26 09:19:59 +00001737 // Do not flag preceeding copytoreg stuff together with the following stuff.
Dan Gohman475871a2008-07-27 21:46:04 +00001738 InFlag = SDValue();
Gordon Henriksen86737662008-01-05 16:56:59 +00001739 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1740 CCValAssign &VA = ArgLocs[i];
1741 if (!VA.isRegLoc()) {
Arnold Schwaighoferc8ab8cd2008-01-11 16:49:42 +00001742 assert(VA.isMemLoc());
Dan Gohman095cc292008-09-13 01:54:27 +00001743 SDValue Arg = TheCall->getArg(i);
1744 ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i);
Gordon Henriksen86737662008-01-05 16:56:59 +00001745 // Create frame index.
1746 int32_t Offset = VA.getLocMemOffset()+FPDiff;
Duncan Sands83ec4b62008-06-06 12:08:01 +00001747 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
Gordon Henriksen86737662008-01-05 16:56:59 +00001748 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset);
Arnold Schwaighofer4b5324a2008-04-12 18:11:06 +00001749 FIN = DAG.getFrameIndex(FI, getPointerTy());
Arnold Schwaighoferc8ab8cd2008-01-11 16:49:42 +00001750
Duncan Sands276dcbd2008-03-21 09:14:45 +00001751 if (Flags.isByVal()) {
Evan Cheng8e5712b2008-01-12 01:08:07 +00001752 // Copy relative to framepointer.
Dan Gohman475871a2008-07-27 21:46:04 +00001753 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset());
Gabor Greifba36cb52008-08-28 21:40:38 +00001754 if (StackPtr.getNode() == 0)
Scott Michelfdc40a02009-02-17 22:15:04 +00001755 StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr,
Dale Johannesendd64c412009-02-04 00:33:20 +00001756 getPointerTy());
Dale Johannesenace16102009-02-03 19:33:06 +00001757 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source);
Arnold Schwaighofer4b5324a2008-04-12 18:11:06 +00001758
1759 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN, Chain,
Dale Johannesendd64c412009-02-04 00:33:20 +00001760 Flags, DAG, dl));
Gordon Henriksen86737662008-01-05 16:56:59 +00001761 } else {
Evan Cheng8e5712b2008-01-12 01:08:07 +00001762 // Store relative to framepointer.
Dan Gohman69de1932008-02-06 22:27:42 +00001763 MemOpChains2.push_back(
Dale Johannesenace16102009-02-03 19:33:06 +00001764 DAG.getStore(Chain, dl, Arg, FIN,
Dan Gohmana54cf172008-07-11 22:44:52 +00001765 PseudoSourceValue::getFixedStack(FI), 0));
Scott Michelfdc40a02009-02-17 22:15:04 +00001766 }
Gordon Henriksen86737662008-01-05 16:56:59 +00001767 }
1768 }
1769
1770 if (!MemOpChains2.empty())
Dale Johannesenace16102009-02-03 19:33:06 +00001771 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
Arnold Schwaighofer719eb022008-01-11 14:34:56 +00001772 &MemOpChains2[0], MemOpChains2.size());
Gordon Henriksen86737662008-01-05 16:56:59 +00001773
Arnold Schwaighofer30e62c02008-04-30 09:16:33 +00001774 // Copy arguments to their registers.
1775 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
Scott Michelfdc40a02009-02-17 22:15:04 +00001776 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
Dale Johannesendd64c412009-02-04 00:33:20 +00001777 RegsToPass[i].second, InFlag);
Arnold Schwaighofer30e62c02008-04-30 09:16:33 +00001778 InFlag = Chain.getValue(1);
1779 }
Dan Gohman475871a2008-07-27 21:46:04 +00001780 InFlag =SDValue();
Arnold Schwaighofer4b5324a2008-04-12 18:11:06 +00001781
Gordon Henriksen86737662008-01-05 16:56:59 +00001782 // Store the return address to the appropriate stack slot.
Arnold Schwaighofer4b5324a2008-04-12 18:11:06 +00001783 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx, Is64Bit,
Dale Johannesenace16102009-02-03 19:33:06 +00001784 FPDiff, dl);
Gordon Henriksen86737662008-01-05 16:56:59 +00001785 }
1786
Evan Cheng32fe1032006-05-25 00:59:30 +00001787 // If the callee is a GlobalAddress node (quite common, every direct call is)
1788 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
Anton Korobeynikova5986852006-11-20 10:46:14 +00001789 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
Anton Korobeynikov2b2bc682006-12-22 22:29:05 +00001790 // We should use extra load for direct calls to dllimported functions in
1791 // non-JIT mode.
Evan Cheng817a6a92008-07-16 01:34:02 +00001792 if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(),
1793 getTargetMachine(), true))
Dan Gohman6520e202008-10-18 02:06:02 +00001794 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy(),
1795 G->getOffset());
Bill Wendling056292f2008-09-16 21:48:12 +00001796 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1797 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
Gordon Henriksen86737662008-01-05 16:56:59 +00001798 } else if (IsTailCall) {
Arnold Schwaighoferbbd8c332009-06-12 16:26:57 +00001799 unsigned Opc = Is64Bit ? X86::R11 : X86::EAX;
Gordon Henriksen86737662008-01-05 16:56:59 +00001800
Dale Johannesendd64c412009-02-04 00:33:20 +00001801 Chain = DAG.getCopyToReg(Chain, dl,
Scott Michelfdc40a02009-02-17 22:15:04 +00001802 DAG.getRegister(Opc, getPointerTy()),
Gordon Henriksen86737662008-01-05 16:56:59 +00001803 Callee,InFlag);
1804 Callee = DAG.getRegister(Opc, getPointerTy());
1805 // Add register as live out.
1806 DAG.getMachineFunction().getRegInfo().addLiveOut(Opc);
Gordon Henriksenae636f82008-01-03 16:47:34 +00001807 }
Scott Michelfdc40a02009-02-17 22:15:04 +00001808
Chris Lattnerd96d0722007-02-25 06:40:16 +00001809 // Returns a chain & a flag for retval copy to use.
1810 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
Dan Gohman475871a2008-07-27 21:46:04 +00001811 SmallVector<SDValue, 8> Ops;
Gordon Henriksen86737662008-01-05 16:56:59 +00001812
1813 if (IsTailCall) {
Dale Johannesene8d72302009-02-06 23:05:02 +00001814 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
1815 DAG.getIntPtrConstant(0, true), InFlag);
Gordon Henriksen86737662008-01-05 16:56:59 +00001816 InFlag = Chain.getValue(1);
Scott Michelfdc40a02009-02-17 22:15:04 +00001817
Gordon Henriksen86737662008-01-05 16:56:59 +00001818 // Returns a chain & a flag for retval copy to use.
1819 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
1820 Ops.clear();
1821 }
Scott Michelfdc40a02009-02-17 22:15:04 +00001822
Nate Begeman4c5dcf52006-02-17 00:03:04 +00001823 Ops.push_back(Chain);
1824 Ops.push_back(Callee);
Evan Chengb69d1132006-06-14 18:17:40 +00001825
Gordon Henriksen86737662008-01-05 16:56:59 +00001826 if (IsTailCall)
1827 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32));
Evan Chengf4684712007-02-21 21:18:14 +00001828
Gordon Henriksen86737662008-01-05 16:56:59 +00001829 // Add argument registers to the end of the list so that they are known live
1830 // into the call.
Evan Cheng9b449442008-01-07 23:08:23 +00001831 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1832 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1833 RegsToPass[i].second.getValueType()));
Scott Michelfdc40a02009-02-17 22:15:04 +00001834
Evan Cheng586ccac2008-03-18 23:36:35 +00001835 // Add an implicit use GOT pointer in EBX.
1836 if (!IsTailCall && !Is64Bit &&
1837 getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
1838 Subtarget->isPICStyleGOT())
1839 Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy()));
1840
1841 // Add an implicit use of AL for x86 vararg functions.
1842 if (Is64Bit && isVarArg)
1843 Ops.push_back(DAG.getRegister(X86::AL, MVT::i8));
1844
Gabor Greifba36cb52008-08-28 21:40:38 +00001845 if (InFlag.getNode())
Evan Cheng347d5f72006-04-28 21:29:37 +00001846 Ops.push_back(InFlag);
Gordon Henriksenae636f82008-01-03 16:47:34 +00001847
Gordon Henriksen86737662008-01-05 16:56:59 +00001848 if (IsTailCall) {
Scott Michelfdc40a02009-02-17 22:15:04 +00001849 assert(InFlag.getNode() &&
Gordon Henriksen86737662008-01-05 16:56:59 +00001850 "Flag must be set. Depend on flag being set in LowerRET");
Dale Johannesenace16102009-02-03 19:33:06 +00001851 Chain = DAG.getNode(X86ISD::TAILCALL, dl,
Dan Gohman095cc292008-09-13 01:54:27 +00001852 TheCall->getVTList(), &Ops[0], Ops.size());
Scott Michelfdc40a02009-02-17 22:15:04 +00001853
Gabor Greifba36cb52008-08-28 21:40:38 +00001854 return SDValue(Chain.getNode(), Op.getResNo());
Gordon Henriksen86737662008-01-05 16:56:59 +00001855 }
1856
Dale Johannesenace16102009-02-03 19:33:06 +00001857 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, &Ops[0], Ops.size());
Evan Cheng347d5f72006-04-28 21:29:37 +00001858 InFlag = Chain.getValue(1);
Evan Chengd90eb7f2006-01-05 00:27:02 +00001859
Chris Lattner2d297092006-05-23 18:50:38 +00001860 // Create the CALLSEQ_END node.
Gordon Henriksen86737662008-01-05 16:56:59 +00001861 unsigned NumBytesForCalleeToPush;
Dan Gohman095cc292008-09-13 01:54:27 +00001862 if (IsCalleePop(isVarArg, CC))
Gordon Henriksen86737662008-01-05 16:56:59 +00001863 NumBytesForCalleeToPush = NumBytes; // Callee pops everything
Evan Chengb188dd92008-09-10 18:25:29 +00001864 else if (!Is64Bit && CC != CallingConv::Fast && IsStructRet)
Anton Korobeynikovb10308e2007-01-28 13:31:35 +00001865 // If this is is a call to a struct-return function, the callee
1866 // pops the hidden struct pointer, so we have to push it back.
1867 // This is common for Darwin/X86, Linux & Mingw32 targets.
Gordon Henriksenae636f82008-01-03 16:47:34 +00001868 NumBytesForCalleeToPush = 4;
Gordon Henriksen86737662008-01-05 16:56:59 +00001869 else
Gordon Henriksenae636f82008-01-03 16:47:34 +00001870 NumBytesForCalleeToPush = 0; // Callee pops nothing.
Scott Michelfdc40a02009-02-17 22:15:04 +00001871
Gordon Henriksenae636f82008-01-03 16:47:34 +00001872 // Returns a flag for retval copy to use.
Bill Wendling0f8d9c02007-11-13 00:44:25 +00001873 Chain = DAG.getCALLSEQ_END(Chain,
Chris Lattnere563bbc2008-10-11 22:08:30 +00001874 DAG.getIntPtrConstant(NumBytes, true),
1875 DAG.getIntPtrConstant(NumBytesForCalleeToPush,
1876 true),
Bill Wendling0f8d9c02007-11-13 00:44:25 +00001877 InFlag);
Chris Lattner3085e152007-02-25 08:59:22 +00001878 InFlag = Chain.getValue(1);
Anton Korobeynikov12c49af2006-11-21 00:01:06 +00001879
Chris Lattner3085e152007-02-25 08:59:22 +00001880 // Handle result values, copying them out of physregs into vregs that we
1881 // return.
Dan Gohman095cc292008-09-13 01:54:27 +00001882 return SDValue(LowerCallResult(Chain, InFlag, TheCall, CC, DAG),
Gabor Greif327ef032008-08-28 23:19:51 +00001883 Op.getResNo());
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +00001884}
1885
Evan Cheng25ab6902006-09-08 06:48:29 +00001886
1887//===----------------------------------------------------------------------===//
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +00001888// Fast Calling Convention (tail call) implementation
1889//===----------------------------------------------------------------------===//
1890
1891// Like std call, callee cleans arguments, convention except that ECX is
1892// reserved for storing the tail called function address. Only 2 registers are
1893// free for argument passing (inreg). Tail call optimization is performed
1894// provided:
1895// * tailcallopt is enabled
1896// * caller/callee are fastcc
Arnold Schwaighofera2a4b472008-02-26 10:21:54 +00001897// On X86_64 architecture with GOT-style position independent code only local
1898// (within module) calls are supported at the moment.
Arnold Schwaighofer48abc5c2007-10-12 21:30:57 +00001899// To keep the stack aligned according to platform abi the function
1900// GetAlignedArgumentStackSize ensures that argument delta is always multiples
1901// of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +00001902// If a tail called function callee has more arguments than the caller the
1903// caller needs to make sure that there is room to move the RETADDR to. This is
Arnold Schwaighofer48abc5c2007-10-12 21:30:57 +00001904// achieved by reserving an area the size of the argument delta right after the
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +00001905// original REtADDR, but before the saved framepointer or the spilled registers
1906// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
1907// stack layout:
1908// arg1
1909// arg2
1910// RETADDR
Scott Michelfdc40a02009-02-17 22:15:04 +00001911// [ new RETADDR
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +00001912// move area ]
1913// (possible EBP)
1914// ESI
1915// EDI
1916// local1 ..
1917
1918/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned
1919/// for a 16 byte align requirement.
Scott Michelfdc40a02009-02-17 22:15:04 +00001920unsigned X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +00001921 SelectionDAG& DAG) {
Evan Chenge9ac9e62008-09-07 09:07:23 +00001922 MachineFunction &MF = DAG.getMachineFunction();
1923 const TargetMachine &TM = MF.getTarget();
1924 const TargetFrameInfo &TFI = *TM.getFrameInfo();
1925 unsigned StackAlignment = TFI.getStackAlignment();
Scott Michelfdc40a02009-02-17 22:15:04 +00001926 uint64_t AlignMask = StackAlignment - 1;
Evan Chenge9ac9e62008-09-07 09:07:23 +00001927 int64_t Offset = StackSize;
Anton Korobeynikovbff66b02008-09-09 18:22:57 +00001928 uint64_t SlotSize = TD->getPointerSize();
Evan Chenge9ac9e62008-09-07 09:07:23 +00001929 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
1930 // Number smaller than 12 so just add the difference.
1931 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
1932 } else {
1933 // Mask out lower bits, add stackalignment once plus the 12 bytes.
Scott Michelfdc40a02009-02-17 22:15:04 +00001934 Offset = ((~AlignMask) & Offset) + StackAlignment +
Evan Chenge9ac9e62008-09-07 09:07:23 +00001935 (StackAlignment-SlotSize);
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +00001936 }
Evan Chenge9ac9e62008-09-07 09:07:23 +00001937 return Offset;
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +00001938}
1939
1940/// IsEligibleForTailCallElimination - Check to see whether the next instruction
Evan Cheng9df7dc52007-11-02 01:26:22 +00001941/// following the call is a return. A function is eligible if caller/callee
1942/// calling conventions match, currently only fastcc supports tail calls, and
1943/// the function CALL is immediatly followed by a RET.
Dan Gohman095cc292008-09-13 01:54:27 +00001944bool X86TargetLowering::IsEligibleForTailCallOptimization(CallSDNode *TheCall,
Dan Gohman475871a2008-07-27 21:46:04 +00001945 SDValue Ret,
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +00001946 SelectionDAG& DAG) const {
Evan Cheng9df7dc52007-11-02 01:26:22 +00001947 if (!PerformTailCallOpt)
1948 return false;
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +00001949
Dan Gohman095cc292008-09-13 01:54:27 +00001950 if (CheckTailCallReturnConstraints(TheCall, Ret)) {
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +00001951 MachineFunction &MF = DAG.getMachineFunction();
1952 unsigned CallerCC = MF.getFunction()->getCallingConv();
Dan Gohman095cc292008-09-13 01:54:27 +00001953 unsigned CalleeCC= TheCall->getCallingConv();
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +00001954 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
Dan Gohman095cc292008-09-13 01:54:27 +00001955 SDValue Callee = TheCall->getCallee();
Arnold Schwaighofera2a4b472008-02-26 10:21:54 +00001956 // On x86/32Bit PIC/GOT tail calls are supported.
Evan Cheng9df7dc52007-11-02 01:26:22 +00001957 if (getTargetMachine().getRelocationModel() != Reloc::PIC_ ||
Arnold Schwaighofera2a4b472008-02-26 10:21:54 +00001958 !Subtarget->isPICStyleGOT()|| !Subtarget->is64Bit())
Evan Cheng9df7dc52007-11-02 01:26:22 +00001959 return true;
1960
Arnold Schwaighofera2a4b472008-02-26 10:21:54 +00001961 // Can only do local tail calls (in same module, hidden or protected) on
1962 // x86_64 PIC/GOT at the moment.
Gordon Henriksen86737662008-01-05 16:56:59 +00001963 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1964 return G->getGlobal()->hasHiddenVisibility()
1965 || G->getGlobal()->hasProtectedVisibility();
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +00001966 }
1967 }
Evan Cheng9df7dc52007-11-02 01:26:22 +00001968
1969 return false;
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +00001970}
1971
Dan Gohman3df24e62008-09-03 23:12:08 +00001972FastISel *
1973X86TargetLowering::createFastISel(MachineFunction &mf,
Dan Gohmand57dd5f2008-09-23 21:53:34 +00001974 MachineModuleInfo *mmo,
Devang Patel83489bb2009-01-13 00:35:13 +00001975 DwarfWriter *dw,
Dan Gohman3df24e62008-09-03 23:12:08 +00001976 DenseMap<const Value *, unsigned> &vm,
1977 DenseMap<const BasicBlock *,
Dan Gohman0586d912008-09-10 20:11:02 +00001978 MachineBasicBlock *> &bm,
Dan Gohmandd5b58a2008-10-14 23:54:11 +00001979 DenseMap<const AllocaInst *, int> &am
1980#ifndef NDEBUG
1981 , SmallSet<Instruction*, 8> &cil
1982#endif
1983 ) {
Devang Patel83489bb2009-01-13 00:35:13 +00001984 return X86::createFastISel(mf, mmo, dw, vm, bm, am
Dan Gohmandd5b58a2008-10-14 23:54:11 +00001985#ifndef NDEBUG
1986 , cil
1987#endif
1988 );
Dan Gohmand9f3c482008-08-19 21:32:53 +00001989}
1990
1991
Chris Lattnerfcf1a3d2007-02-28 06:10:12 +00001992//===----------------------------------------------------------------------===//
1993// Other Lowering Hooks
1994//===----------------------------------------------------------------------===//
1995
1996
Dan Gohman475871a2008-07-27 21:46:04 +00001997SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) {
Anton Korobeynikova2780e12007-08-15 17:12:32 +00001998 MachineFunction &MF = DAG.getMachineFunction();
1999 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2000 int ReturnAddrIndex = FuncInfo->getRAIndex();
2001
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +00002002 if (ReturnAddrIndex == 0) {
2003 // Set up a frame object for the return address.
Bill Wendling64e87322009-01-16 19:25:27 +00002004 uint64_t SlotSize = TD->getPointerSize();
Anton Korobeynikovbff66b02008-09-09 18:22:57 +00002005 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, -SlotSize);
Anton Korobeynikova2780e12007-08-15 17:12:32 +00002006 FuncInfo->setRAIndex(ReturnAddrIndex);
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +00002007 }
2008
Evan Cheng25ab6902006-09-08 06:48:29 +00002009 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy());
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +00002010}
2011
2012
Chris Lattner1c39d4c2008-12-24 23:53:05 +00002013/// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86
2014/// specific condition code, returning the condition code and the LHS/RHS of the
2015/// comparison to make.
2016static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
2017 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) {
Evan Chengd9558e02006-01-06 00:43:03 +00002018 if (!isFP) {
Chris Lattnerbfd68a72006-09-13 17:04:54 +00002019 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
2020 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
2021 // X > -1 -> X == 0, jump !sign.
2022 RHS = DAG.getConstant(0, RHS.getValueType());
Chris Lattner1c39d4c2008-12-24 23:53:05 +00002023 return X86::COND_NS;
Chris Lattnerbfd68a72006-09-13 17:04:54 +00002024 } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
2025 // X < 0 -> X == 0, jump on sign.
Chris Lattner1c39d4c2008-12-24 23:53:05 +00002026 return X86::COND_S;
Dan Gohmanf5aeb1a2008-09-12 16:56:44 +00002027 } else if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
Dan Gohman5f6913c2007-09-17 14:49:27 +00002028 // X < 1 -> X <= 0
2029 RHS = DAG.getConstant(0, RHS.getValueType());
Chris Lattner1c39d4c2008-12-24 23:53:05 +00002030 return X86::COND_LE;
Chris Lattnerbfd68a72006-09-13 17:04:54 +00002031 }
Chris Lattnerf9570512006-09-13 03:22:10 +00002032 }
Anton Korobeynikov12c49af2006-11-21 00:01:06 +00002033
Evan Chengd9558e02006-01-06 00:43:03 +00002034 switch (SetCCOpcode) {
Chris Lattner4c78e022008-12-23 23:42:27 +00002035 default: assert(0 && "Invalid integer condition!");
Chris Lattner1c39d4c2008-12-24 23:53:05 +00002036 case ISD::SETEQ: return X86::COND_E;
2037 case ISD::SETGT: return X86::COND_G;
2038 case ISD::SETGE: return X86::COND_GE;
2039 case ISD::SETLT: return X86::COND_L;
2040 case ISD::SETLE: return X86::COND_LE;
2041 case ISD::SETNE: return X86::COND_NE;
2042 case ISD::SETULT: return X86::COND_B;
2043 case ISD::SETUGT: return X86::COND_A;
2044 case ISD::SETULE: return X86::COND_BE;
2045 case ISD::SETUGE: return X86::COND_AE;
Evan Chengd9558e02006-01-06 00:43:03 +00002046 }
Chris Lattner4c78e022008-12-23 23:42:27 +00002047 }
Scott Michelfdc40a02009-02-17 22:15:04 +00002048
Chris Lattner4c78e022008-12-23 23:42:27 +00002049 // First determine if it is required or is profitable to flip the operands.
Duncan Sands4047f4a2008-10-24 13:03:10 +00002050
Chris Lattner4c78e022008-12-23 23:42:27 +00002051 // If LHS is a foldable load, but RHS is not, flip the condition.
2052 if ((ISD::isNON_EXTLoad(LHS.getNode()) && LHS.hasOneUse()) &&
2053 !(ISD::isNON_EXTLoad(RHS.getNode()) && RHS.hasOneUse())) {
2054 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
2055 std::swap(LHS, RHS);
Evan Cheng4d46d0a2008-08-28 23:48:31 +00002056 }
2057
Chris Lattner4c78e022008-12-23 23:42:27 +00002058 switch (SetCCOpcode) {
2059 default: break;
2060 case ISD::SETOLT:
2061 case ISD::SETOLE:
2062 case ISD::SETUGT:
2063 case ISD::SETUGE:
2064 std::swap(LHS, RHS);
2065 break;
2066 }
2067
2068 // On a floating point condition, the flags are set as follows:
2069 // ZF PF CF op
2070 // 0 | 0 | 0 | X > Y
2071 // 0 | 0 | 1 | X < Y
2072 // 1 | 0 | 0 | X == Y
2073 // 1 | 1 | 1 | unordered
2074 switch (SetCCOpcode) {
Chris Lattner1c39d4c2008-12-24 23:53:05 +00002075 default: assert(0 && "Condcode should be pre-legalized away");
Chris Lattner4c78e022008-12-23 23:42:27 +00002076 case ISD::SETUEQ:
Chris Lattner1c39d4c2008-12-24 23:53:05 +00002077 case ISD::SETEQ: return X86::COND_E;
Chris Lattner4c78e022008-12-23 23:42:27 +00002078 case ISD::SETOLT: // flipped
2079 case ISD::SETOGT:
Chris Lattner1c39d4c2008-12-24 23:53:05 +00002080 case ISD::SETGT: return X86::COND_A;
Chris Lattner4c78e022008-12-23 23:42:27 +00002081 case ISD::SETOLE: // flipped
2082 case ISD::SETOGE:
Chris Lattner1c39d4c2008-12-24 23:53:05 +00002083 case ISD::SETGE: return X86::COND_AE;
Chris Lattner4c78e022008-12-23 23:42:27 +00002084 case ISD::SETUGT: // flipped
2085 case ISD::SETULT:
Chris Lattner1c39d4c2008-12-24 23:53:05 +00002086 case ISD::SETLT: return X86::COND_B;
Chris Lattner4c78e022008-12-23 23:42:27 +00002087 case ISD::SETUGE: // flipped
2088 case ISD::SETULE:
Chris Lattner1c39d4c2008-12-24 23:53:05 +00002089 case ISD::SETLE: return X86::COND_BE;
Chris Lattner4c78e022008-12-23 23:42:27 +00002090 case ISD::SETONE:
Chris Lattner1c39d4c2008-12-24 23:53:05 +00002091 case ISD::SETNE: return X86::COND_NE;
2092 case ISD::SETUO: return X86::COND_P;
2093 case ISD::SETO: return X86::COND_NP;
Chris Lattner4c78e022008-12-23 23:42:27 +00002094 }
Evan Chengd9558e02006-01-06 00:43:03 +00002095}
2096
Evan Cheng4a460802006-01-11 00:33:36 +00002097/// hasFPCMov - is there a floating point cmov for the specific X86 condition
2098/// code. Current x86 isa includes the following FP cmov instructions:
Evan Chengaaca22c2006-01-10 20:26:56 +00002099/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
Evan Cheng4a460802006-01-11 00:33:36 +00002100static bool hasFPCMov(unsigned X86CC) {
Evan Chengaaca22c2006-01-10 20:26:56 +00002101 switch (X86CC) {
2102 default:
2103 return false;
Chris Lattner7fbe9722006-10-20 17:42:20 +00002104 case X86::COND_B:
2105 case X86::COND_BE:
2106 case X86::COND_E:
2107 case X86::COND_P:
2108 case X86::COND_A:
2109 case X86::COND_AE:
2110 case X86::COND_NE:
2111 case X86::COND_NP:
Evan Chengaaca22c2006-01-10 20:26:56 +00002112 return true;
2113 }
2114}
2115
Nate Begeman9008ca62009-04-27 18:41:29 +00002116/// isUndefOrInRange - Return true if Val is undef or if its value falls within
2117/// the specified range (L, H].
2118static bool isUndefOrInRange(int Val, int Low, int Hi) {
2119 return (Val < 0) || (Val >= Low && Val < Hi);
2120}
2121
2122/// isUndefOrEqual - Val is either less than zero (undef) or equal to the
2123/// specified value.
2124static bool isUndefOrEqual(int Val, int CmpVal) {
2125 if (Val < 0 || Val == CmpVal)
Evan Cheng5ced1d82006-04-06 23:23:56 +00002126 return true;
Nate Begeman9008ca62009-04-27 18:41:29 +00002127 return false;
Evan Chengc5cdff22006-04-07 21:53:05 +00002128}
2129
Nate Begeman9008ca62009-04-27 18:41:29 +00002130/// isPSHUFDMask - Return true if the node specifies a shuffle of elements that
2131/// is suitable for input to PSHUFD or PSHUFW. That is, it doesn't reference
2132/// the second operand.
Nate Begeman5a5ca152009-04-29 05:20:52 +00002133static bool isPSHUFDMask(const SmallVectorImpl<int> &Mask, MVT VT) {
Nate Begeman9008ca62009-04-27 18:41:29 +00002134 if (VT == MVT::v4f32 || VT == MVT::v4i32 || VT == MVT::v4i16)
2135 return (Mask[0] < 4 && Mask[1] < 4 && Mask[2] < 4 && Mask[3] < 4);
2136 if (VT == MVT::v2f64 || VT == MVT::v2i64)
2137 return (Mask[0] < 2 && Mask[1] < 2);
2138 return false;
Evan Cheng5ced1d82006-04-06 23:23:56 +00002139}
2140
Nate Begeman9008ca62009-04-27 18:41:29 +00002141bool X86::isPSHUFDMask(ShuffleVectorSDNode *N) {
2142 SmallVector<int, 8> M;
2143 N->getMask(M);
2144 return ::isPSHUFDMask(M, N->getValueType(0));
2145}
Evan Cheng0188ecb2006-03-22 18:59:22 +00002146
Nate Begeman9008ca62009-04-27 18:41:29 +00002147/// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that
2148/// is suitable for input to PSHUFHW.
Nate Begeman5a5ca152009-04-29 05:20:52 +00002149static bool isPSHUFHWMask(const SmallVectorImpl<int> &Mask, MVT VT) {
Nate Begeman9008ca62009-04-27 18:41:29 +00002150 if (VT != MVT::v8i16)
Evan Cheng0188ecb2006-03-22 18:59:22 +00002151 return false;
Nate Begeman9008ca62009-04-27 18:41:29 +00002152
2153 // Lower quadword copied in order or undef.
2154 for (int i = 0; i != 4; ++i)
2155 if (Mask[i] >= 0 && Mask[i] != i)
Evan Cheng506d3df2006-03-29 23:07:14 +00002156 return false;
Nate Begeman9008ca62009-04-27 18:41:29 +00002157
Evan Cheng506d3df2006-03-29 23:07:14 +00002158 // Upper quadword shuffled.
Nate Begeman9008ca62009-04-27 18:41:29 +00002159 for (int i = 4; i != 8; ++i)
2160 if (Mask[i] >= 0 && (Mask[i] < 4 || Mask[i] > 7))
Evan Cheng506d3df2006-03-29 23:07:14 +00002161 return false;
Nate Begeman9008ca62009-04-27 18:41:29 +00002162
Evan Cheng506d3df2006-03-29 23:07:14 +00002163 return true;
2164}
2165
Nate Begeman9008ca62009-04-27 18:41:29 +00002166bool X86::isPSHUFHWMask(ShuffleVectorSDNode *N) {
2167 SmallVector<int, 8> M;
2168 N->getMask(M);
2169 return ::isPSHUFHWMask(M, N->getValueType(0));
2170}
Evan Cheng506d3df2006-03-29 23:07:14 +00002171
Nate Begeman9008ca62009-04-27 18:41:29 +00002172/// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that
2173/// is suitable for input to PSHUFLW.
Nate Begeman5a5ca152009-04-29 05:20:52 +00002174static bool isPSHUFLWMask(const SmallVectorImpl<int> &Mask, MVT VT) {
Nate Begeman9008ca62009-04-27 18:41:29 +00002175 if (VT != MVT::v8i16)
Evan Cheng506d3df2006-03-29 23:07:14 +00002176 return false;
Nate Begeman9008ca62009-04-27 18:41:29 +00002177
Rafael Espindola15684b22009-04-24 12:40:33 +00002178 // Upper quadword copied in order.
Nate Begeman9008ca62009-04-27 18:41:29 +00002179 for (int i = 4; i != 8; ++i)
2180 if (Mask[i] >= 0 && Mask[i] != i)
Rafael Espindola15684b22009-04-24 12:40:33 +00002181 return false;
Nate Begeman9008ca62009-04-27 18:41:29 +00002182
Rafael Espindola15684b22009-04-24 12:40:33 +00002183 // Lower quadword shuffled.
Nate Begeman9008ca62009-04-27 18:41:29 +00002184 for (int i = 0; i != 4; ++i)
2185 if (Mask[i] >= 4)
Rafael Espindola15684b22009-04-24 12:40:33 +00002186 return false;
Nate Begeman9008ca62009-04-27 18:41:29 +00002187
Rafael Espindola15684b22009-04-24 12:40:33 +00002188 return true;
Nate Begemanb706d292009-04-24 03:42:54 +00002189}
2190
Nate Begeman9008ca62009-04-27 18:41:29 +00002191bool X86::isPSHUFLWMask(ShuffleVectorSDNode *N) {
2192 SmallVector<int, 8> M;
2193 N->getMask(M);
2194 return ::isPSHUFLWMask(M, N->getValueType(0));
2195}
2196
Evan Cheng14aed5e2006-03-24 01:18:28 +00002197/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
2198/// specifies a shuffle of elements that is suitable for input to SHUFP*.
Nate Begeman5a5ca152009-04-29 05:20:52 +00002199static bool isSHUFPMask(const SmallVectorImpl<int> &Mask, MVT VT) {
Nate Begeman9008ca62009-04-27 18:41:29 +00002200 int NumElems = VT.getVectorNumElements();
2201 if (NumElems != 2 && NumElems != 4)
2202 return false;
2203
2204 int Half = NumElems / 2;
2205 for (int i = 0; i < Half; ++i)
2206 if (!isUndefOrInRange(Mask[i], 0, NumElems))
Evan Cheng39623da2006-04-20 08:58:49 +00002207 return false;
Nate Begeman9008ca62009-04-27 18:41:29 +00002208 for (int i = Half; i < NumElems; ++i)
2209 if (!isUndefOrInRange(Mask[i], NumElems, NumElems*2))
Evan Cheng39623da2006-04-20 08:58:49 +00002210 return false;
Nate Begeman9008ca62009-04-27 18:41:29 +00002211
Evan Cheng14aed5e2006-03-24 01:18:28 +00002212 return true;
2213}
2214
Nate Begeman9008ca62009-04-27 18:41:29 +00002215bool X86::isSHUFPMask(ShuffleVectorSDNode *N) {
2216 SmallVector<int, 8> M;
2217 N->getMask(M);
2218 return ::isSHUFPMask(M, N->getValueType(0));
Evan Cheng39623da2006-04-20 08:58:49 +00002219}
2220
Evan Cheng213d2cf2007-05-17 18:45:50 +00002221/// isCommutedSHUFP - Returns true if the shuffle mask is exactly
Evan Cheng39623da2006-04-20 08:58:49 +00002222/// the reverse of what x86 shuffles want. x86 shuffles requires the lower
2223/// half elements to come from vector 1 (which would equal the dest.) and
2224/// the upper half to come from vector 2.
Nate Begeman5a5ca152009-04-29 05:20:52 +00002225static bool isCommutedSHUFPMask(const SmallVectorImpl<int> &Mask, MVT VT) {
Nate Begeman9008ca62009-04-27 18:41:29 +00002226 int NumElems = VT.getVectorNumElements();
2227
2228 if (NumElems != 2 && NumElems != 4)
2229 return false;
2230
2231 int Half = NumElems / 2;
2232 for (int i = 0; i < Half; ++i)
2233 if (!isUndefOrInRange(Mask[i], NumElems, NumElems*2))
Evan Cheng39623da2006-04-20 08:58:49 +00002234 return false;
Nate Begeman9008ca62009-04-27 18:41:29 +00002235 for (int i = Half; i < NumElems; ++i)
2236 if (!isUndefOrInRange(Mask[i], 0, NumElems))
Evan Cheng39623da2006-04-20 08:58:49 +00002237 return false;
2238 return true;
2239}
2240
Nate Begeman9008ca62009-04-27 18:41:29 +00002241static bool isCommutedSHUFP(ShuffleVectorSDNode *N) {
2242 SmallVector<int, 8> M;
2243 N->getMask(M);
2244 return isCommutedSHUFPMask(M, N->getValueType(0));
Evan Cheng39623da2006-04-20 08:58:49 +00002245}
2246
Evan Cheng2c0dbd02006-03-24 02:58:06 +00002247/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
2248/// specifies a shuffle of elements that is suitable for input to MOVHLPS.
Nate Begeman9008ca62009-04-27 18:41:29 +00002249bool X86::isMOVHLPSMask(ShuffleVectorSDNode *N) {
2250 if (N->getValueType(0).getVectorNumElements() != 4)
Evan Cheng2c0dbd02006-03-24 02:58:06 +00002251 return false;
2252
Evan Cheng2064a2b2006-03-28 06:50:32 +00002253 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3
Nate Begeman9008ca62009-04-27 18:41:29 +00002254 return isUndefOrEqual(N->getMaskElt(0), 6) &&
2255 isUndefOrEqual(N->getMaskElt(1), 7) &&
2256 isUndefOrEqual(N->getMaskElt(2), 2) &&
2257 isUndefOrEqual(N->getMaskElt(3), 3);
Evan Cheng6e56e2c2006-11-07 22:14:24 +00002258}
2259
Evan Cheng5ced1d82006-04-06 23:23:56 +00002260/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
2261/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
Nate Begeman9008ca62009-04-27 18:41:29 +00002262bool X86::isMOVLPMask(ShuffleVectorSDNode *N) {
2263 unsigned NumElems = N->getValueType(0).getVectorNumElements();
Evan Cheng5ced1d82006-04-06 23:23:56 +00002264
Evan Cheng5ced1d82006-04-06 23:23:56 +00002265 if (NumElems != 2 && NumElems != 4)
2266 return false;
2267
Evan Chengc5cdff22006-04-07 21:53:05 +00002268 for (unsigned i = 0; i < NumElems/2; ++i)
Nate Begeman9008ca62009-04-27 18:41:29 +00002269 if (!isUndefOrEqual(N->getMaskElt(i), i + NumElems))
Evan Chengc5cdff22006-04-07 21:53:05 +00002270 return false;
Evan Cheng5ced1d82006-04-06 23:23:56 +00002271
Evan Chengc5cdff22006-04-07 21:53:05 +00002272 for (unsigned i = NumElems/2; i < NumElems; ++i)
Nate Begeman9008ca62009-04-27 18:41:29 +00002273 if (!isUndefOrEqual(N->getMaskElt(i), i))
Evan Chengc5cdff22006-04-07 21:53:05 +00002274 return false;
Evan Cheng5ced1d82006-04-06 23:23:56 +00002275
2276 return true;
2277}
2278
2279/// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand
Evan Cheng533a0aa2006-04-19 20:35:22 +00002280/// specifies a shuffle of elements that is suitable for input to MOVHP{S|D}
2281/// and MOVLHPS.
Nate Begeman9008ca62009-04-27 18:41:29 +00002282bool X86::isMOVHPMask(ShuffleVectorSDNode *N) {
2283 unsigned NumElems = N->getValueType(0).getVectorNumElements();
Evan Cheng5ced1d82006-04-06 23:23:56 +00002284
Evan Cheng5ced1d82006-04-06 23:23:56 +00002285 if (NumElems != 2 && NumElems != 4)
2286 return false;
2287
Evan Chengc5cdff22006-04-07 21:53:05 +00002288 for (unsigned i = 0; i < NumElems/2; ++i)
Nate Begeman9008ca62009-04-27 18:41:29 +00002289 if (!isUndefOrEqual(N->getMaskElt(i), i))
Evan Chengc5cdff22006-04-07 21:53:05 +00002290 return false;
Evan Cheng5ced1d82006-04-06 23:23:56 +00002291
Nate Begeman9008ca62009-04-27 18:41:29 +00002292 for (unsigned i = 0; i < NumElems/2; ++i)
2293 if (!isUndefOrEqual(N->getMaskElt(i + NumElems/2), i + NumElems))
Evan Chengc5cdff22006-04-07 21:53:05 +00002294 return false;
Evan Cheng5ced1d82006-04-06 23:23:56 +00002295
2296 return true;
2297}
2298
Nate Begeman9008ca62009-04-27 18:41:29 +00002299/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
2300/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
2301/// <2, 3, 2, 3>
2302bool X86::isMOVHLPS_v_undef_Mask(ShuffleVectorSDNode *N) {
2303 unsigned NumElems = N->getValueType(0).getVectorNumElements();
2304
2305 if (NumElems != 4)
2306 return false;
2307
2308 return isUndefOrEqual(N->getMaskElt(0), 2) &&
2309 isUndefOrEqual(N->getMaskElt(1), 3) &&
2310 isUndefOrEqual(N->getMaskElt(2), 2) &&
2311 isUndefOrEqual(N->getMaskElt(3), 3);
2312}
2313
Evan Cheng0038e592006-03-28 00:39:58 +00002314/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
2315/// specifies a shuffle of elements that is suitable for input to UNPCKL.
Nate Begeman5a5ca152009-04-29 05:20:52 +00002316static bool isUNPCKLMask(const SmallVectorImpl<int> &Mask, MVT VT,
Rafael Espindola15684b22009-04-24 12:40:33 +00002317 bool V2IsSplat = false) {
Nate Begeman9008ca62009-04-27 18:41:29 +00002318 int NumElts = VT.getVectorNumElements();
Chris Lattner5a88b832007-02-25 07:10:00 +00002319 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16)
Evan Cheng0038e592006-03-28 00:39:58 +00002320 return false;
Nate Begeman9008ca62009-04-27 18:41:29 +00002321
2322 for (int i = 0, j = 0; i != NumElts; i += 2, ++j) {
2323 int BitI = Mask[i];
2324 int BitI1 = Mask[i+1];
Evan Chengc5cdff22006-04-07 21:53:05 +00002325 if (!isUndefOrEqual(BitI, j))
2326 return false;
Evan Cheng39623da2006-04-20 08:58:49 +00002327 if (V2IsSplat) {
Mon P Wang7bcaefa2009-02-04 01:16:59 +00002328 if (!isUndefOrEqual(BitI1, NumElts))
Evan Cheng39623da2006-04-20 08:58:49 +00002329 return false;
2330 } else {
Chris Lattner5a88b832007-02-25 07:10:00 +00002331 if (!isUndefOrEqual(BitI1, j + NumElts))
Evan Cheng39623da2006-04-20 08:58:49 +00002332 return false;
2333 }
Evan Cheng0038e592006-03-28 00:39:58 +00002334 }
Evan Cheng0038e592006-03-28 00:39:58 +00002335 return true;
2336}
2337
Nate Begeman9008ca62009-04-27 18:41:29 +00002338bool X86::isUNPCKLMask(ShuffleVectorSDNode *N, bool V2IsSplat) {
2339 SmallVector<int, 8> M;
2340 N->getMask(M);
2341 return ::isUNPCKLMask(M, N->getValueType(0), V2IsSplat);
Evan Cheng39623da2006-04-20 08:58:49 +00002342}
2343
Evan Cheng4fcb9222006-03-28 02:43:26 +00002344/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
2345/// specifies a shuffle of elements that is suitable for input to UNPCKH.
Nate Begeman5a5ca152009-04-29 05:20:52 +00002346static bool isUNPCKHMask(const SmallVectorImpl<int> &Mask, MVT VT,
Rafael Espindola15684b22009-04-24 12:40:33 +00002347 bool V2IsSplat = false) {
Nate Begeman9008ca62009-04-27 18:41:29 +00002348 int NumElts = VT.getVectorNumElements();
Chris Lattner5a88b832007-02-25 07:10:00 +00002349 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16)
Evan Cheng4fcb9222006-03-28 02:43:26 +00002350 return false;
Nate Begeman9008ca62009-04-27 18:41:29 +00002351
2352 for (int i = 0, j = 0; i != NumElts; i += 2, ++j) {
2353 int BitI = Mask[i];
2354 int BitI1 = Mask[i+1];
Chris Lattner5a88b832007-02-25 07:10:00 +00002355 if (!isUndefOrEqual(BitI, j + NumElts/2))
Evan Chengc5cdff22006-04-07 21:53:05 +00002356 return false;
Evan Cheng39623da2006-04-20 08:58:49 +00002357 if (V2IsSplat) {
Chris Lattner5a88b832007-02-25 07:10:00 +00002358 if (isUndefOrEqual(BitI1, NumElts))
Evan Cheng39623da2006-04-20 08:58:49 +00002359 return false;
2360 } else {
Chris Lattner5a88b832007-02-25 07:10:00 +00002361 if (!isUndefOrEqual(BitI1, j + NumElts/2 + NumElts))
Evan Cheng39623da2006-04-20 08:58:49 +00002362 return false;
2363 }
Evan Cheng4fcb9222006-03-28 02:43:26 +00002364 }
Evan Cheng4fcb9222006-03-28 02:43:26 +00002365 return true;
2366}
2367
Nate Begeman9008ca62009-04-27 18:41:29 +00002368bool X86::isUNPCKHMask(ShuffleVectorSDNode *N, bool V2IsSplat) {
2369 SmallVector<int, 8> M;
2370 N->getMask(M);
2371 return ::isUNPCKHMask(M, N->getValueType(0), V2IsSplat);
Evan Cheng39623da2006-04-20 08:58:49 +00002372}
2373
Evan Cheng1d5a8cc2006-04-05 07:20:06 +00002374/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
2375/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
2376/// <0, 0, 1, 1>
Nate Begeman5a5ca152009-04-29 05:20:52 +00002377static bool isUNPCKL_v_undef_Mask(const SmallVectorImpl<int> &Mask, MVT VT) {
Nate Begeman9008ca62009-04-27 18:41:29 +00002378 int NumElems = VT.getVectorNumElements();
Bill Wendling2f9bb1a2007-04-24 21:16:55 +00002379 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
Evan Cheng1d5a8cc2006-04-05 07:20:06 +00002380 return false;
Nate Begeman9008ca62009-04-27 18:41:29 +00002381
2382 for (int i = 0, j = 0; i != NumElems; i += 2, ++j) {
2383 int BitI = Mask[i];
2384 int BitI1 = Mask[i+1];
Evan Chengc5cdff22006-04-07 21:53:05 +00002385 if (!isUndefOrEqual(BitI, j))
2386 return false;
2387 if (!isUndefOrEqual(BitI1, j))
2388 return false;
Evan Cheng1d5a8cc2006-04-05 07:20:06 +00002389 }
Rafael Espindola15684b22009-04-24 12:40:33 +00002390 return true;
Nate Begemanb706d292009-04-24 03:42:54 +00002391}
2392
Nate Begeman9008ca62009-04-27 18:41:29 +00002393bool X86::isUNPCKL_v_undef_Mask(ShuffleVectorSDNode *N) {
2394 SmallVector<int, 8> M;
2395 N->getMask(M);
2396 return ::isUNPCKL_v_undef_Mask(M, N->getValueType(0));
2397}
2398
Bill Wendling2f9bb1a2007-04-24 21:16:55 +00002399/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
2400/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
2401/// <2, 2, 3, 3>
Nate Begeman5a5ca152009-04-29 05:20:52 +00002402static bool isUNPCKH_v_undef_Mask(const SmallVectorImpl<int> &Mask, MVT VT) {
Nate Begeman9008ca62009-04-27 18:41:29 +00002403 int NumElems = VT.getVectorNumElements();
Bill Wendling2f9bb1a2007-04-24 21:16:55 +00002404 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
2405 return false;
Nate Begeman9008ca62009-04-27 18:41:29 +00002406
2407 for (int i = 0, j = NumElems / 2; i != NumElems; i += 2, ++j) {
2408 int BitI = Mask[i];
2409 int BitI1 = Mask[i+1];
Bill Wendling2f9bb1a2007-04-24 21:16:55 +00002410 if (!isUndefOrEqual(BitI, j))
2411 return false;
2412 if (!isUndefOrEqual(BitI1, j))
2413 return false;
2414 }
Rafael Espindola15684b22009-04-24 12:40:33 +00002415 return true;
Nate Begemanb706d292009-04-24 03:42:54 +00002416}
2417
Nate Begeman9008ca62009-04-27 18:41:29 +00002418bool X86::isUNPCKH_v_undef_Mask(ShuffleVectorSDNode *N) {
2419 SmallVector<int, 8> M;
2420 N->getMask(M);
2421 return ::isUNPCKH_v_undef_Mask(M, N->getValueType(0));
2422}
2423
Evan Cheng017dcc62006-04-21 01:05:10 +00002424/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
2425/// specifies a shuffle of elements that is suitable for input to MOVSS,
2426/// MOVSD, and MOVD, i.e. setting the lowest element.
Nate Begeman5a5ca152009-04-29 05:20:52 +00002427static bool isMOVLMask(const SmallVectorImpl<int> &Mask, MVT VT) {
Eli Friedman10415532009-06-06 06:05:10 +00002428 if (VT.getVectorElementType().getSizeInBits() < 32)
Evan Chengd6d1cbd2006-04-11 00:19:04 +00002429 return false;
Eli Friedman10415532009-06-06 06:05:10 +00002430
2431 int NumElts = VT.getVectorNumElements();
Nate Begeman9008ca62009-04-27 18:41:29 +00002432
2433 if (!isUndefOrEqual(Mask[0], NumElts))
Evan Chengd6d1cbd2006-04-11 00:19:04 +00002434 return false;
Nate Begeman9008ca62009-04-27 18:41:29 +00002435
2436 for (int i = 1; i < NumElts; ++i)
2437 if (!isUndefOrEqual(Mask[i], i))
Evan Chengd6d1cbd2006-04-11 00:19:04 +00002438 return false;
Nate Begeman9008ca62009-04-27 18:41:29 +00002439
Evan Chengd6d1cbd2006-04-11 00:19:04 +00002440 return true;
2441}
Evan Cheng1d5a8cc2006-04-05 07:20:06 +00002442
Nate Begeman9008ca62009-04-27 18:41:29 +00002443bool X86::isMOVLMask(ShuffleVectorSDNode *N) {
2444 SmallVector<int, 8> M;
2445 N->getMask(M);
2446 return ::isMOVLMask(M, N->getValueType(0));
Evan Cheng39623da2006-04-20 08:58:49 +00002447}
2448
Evan Cheng017dcc62006-04-21 01:05:10 +00002449/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse
2450/// of what x86 movss want. X86 movs requires the lowest element to be lowest
Evan Cheng39623da2006-04-20 08:58:49 +00002451/// element of vector 2 and the other elements to come from vector 1 in order.
Nate Begeman5a5ca152009-04-29 05:20:52 +00002452static bool isCommutedMOVLMask(const SmallVectorImpl<int> &Mask, MVT VT,
Nate Begeman9008ca62009-04-27 18:41:29 +00002453 bool V2IsSplat = false, bool V2IsUndef = false) {
2454 int NumOps = VT.getVectorNumElements();
Chris Lattner5a88b832007-02-25 07:10:00 +00002455 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16)
Evan Cheng39623da2006-04-20 08:58:49 +00002456 return false;
Nate Begeman9008ca62009-04-27 18:41:29 +00002457
2458 if (!isUndefOrEqual(Mask[0], 0))
Evan Cheng39623da2006-04-20 08:58:49 +00002459 return false;
Nate Begeman9008ca62009-04-27 18:41:29 +00002460
2461 for (int i = 1; i < NumOps; ++i)
2462 if (!(isUndefOrEqual(Mask[i], i+NumOps) ||
2463 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) ||
2464 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps))))
Evan Cheng8cf723d2006-09-08 01:50:06 +00002465 return false;
Nate Begeman9008ca62009-04-27 18:41:29 +00002466
Evan Cheng39623da2006-04-20 08:58:49 +00002467 return true;
2468}
2469
Nate Begeman9008ca62009-04-27 18:41:29 +00002470static bool isCommutedMOVL(ShuffleVectorSDNode *N, bool V2IsSplat = false,
Evan Cheng8cf723d2006-09-08 01:50:06 +00002471 bool V2IsUndef = false) {
Nate Begeman9008ca62009-04-27 18:41:29 +00002472 SmallVector<int, 8> M;
2473 N->getMask(M);
2474 return isCommutedMOVLMask(M, N->getValueType(0), V2IsSplat, V2IsUndef);
Evan Cheng39623da2006-04-20 08:58:49 +00002475}
2476
Evan Chengd9539472006-04-14 21:59:03 +00002477/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
2478/// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
Nate Begeman9008ca62009-04-27 18:41:29 +00002479bool X86::isMOVSHDUPMask(ShuffleVectorSDNode *N) {
2480 if (N->getValueType(0).getVectorNumElements() != 4)
Evan Chengd9539472006-04-14 21:59:03 +00002481 return false;
2482
2483 // Expect 1, 1, 3, 3
Rafael Espindola15684b22009-04-24 12:40:33 +00002484 for (unsigned i = 0; i < 2; ++i) {
Nate Begeman9008ca62009-04-27 18:41:29 +00002485 int Elt = N->getMaskElt(i);
2486 if (Elt >= 0 && Elt != 1)
2487 return false;
Rafael Espindola15684b22009-04-24 12:40:33 +00002488 }
Evan Cheng57ebe9f2006-04-15 05:37:34 +00002489
2490 bool HasHi = false;
Evan Chengd9539472006-04-14 21:59:03 +00002491 for (unsigned i = 2; i < 4; ++i) {
Nate Begeman9008ca62009-04-27 18:41:29 +00002492 int Elt = N->getMaskElt(i);
2493 if (Elt >= 0 && Elt != 3)
2494 return false;
2495 if (Elt == 3)
2496 HasHi = true;
Evan Chengd9539472006-04-14 21:59:03 +00002497 }
Evan Cheng57ebe9f2006-04-15 05:37:34 +00002498 // Don't use movshdup if it can be done with a shufps.
Nate Begeman9008ca62009-04-27 18:41:29 +00002499 // FIXME: verify that matching u, u, 3, 3 is what we want.
Evan Cheng57ebe9f2006-04-15 05:37:34 +00002500 return HasHi;
Evan Chengd9539472006-04-14 21:59:03 +00002501}
2502
2503/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
2504/// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
Nate Begeman9008ca62009-04-27 18:41:29 +00002505bool X86::isMOVSLDUPMask(ShuffleVectorSDNode *N) {
2506 if (N->getValueType(0).getVectorNumElements() != 4)
Evan Chengd9539472006-04-14 21:59:03 +00002507 return false;
2508
2509 // Expect 0, 0, 2, 2
Nate Begeman9008ca62009-04-27 18:41:29 +00002510 for (unsigned i = 0; i < 2; ++i)
2511 if (N->getMaskElt(i) > 0)
2512 return false;
Evan Cheng57ebe9f2006-04-15 05:37:34 +00002513
2514 bool HasHi = false;
Evan Chengd9539472006-04-14 21:59:03 +00002515 for (unsigned i = 2; i < 4; ++i) {
Nate Begeman9008ca62009-04-27 18:41:29 +00002516 int Elt = N->getMaskElt(i);
2517 if (Elt >= 0 && Elt != 2)
2518 return false;
2519 if (Elt == 2)
2520 HasHi = true;
Evan Chengd9539472006-04-14 21:59:03 +00002521 }
Nate Begeman9008ca62009-04-27 18:41:29 +00002522 // Don't use movsldup if it can be done with a shufps.
Evan Cheng57ebe9f2006-04-15 05:37:34 +00002523 return HasHi;
Evan Chengd9539472006-04-14 21:59:03 +00002524}
2525
Evan Cheng0b457f02008-09-25 20:50:48 +00002526/// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
2527/// specifies a shuffle of elements that is suitable for input to MOVDDUP.
Nate Begeman9008ca62009-04-27 18:41:29 +00002528bool X86::isMOVDDUPMask(ShuffleVectorSDNode *N) {
2529 int e = N->getValueType(0).getVectorNumElements() / 2;
2530
2531 for (int i = 0; i < e; ++i)
2532 if (!isUndefOrEqual(N->getMaskElt(i), i))
Evan Cheng0b457f02008-09-25 20:50:48 +00002533 return false;
Nate Begeman9008ca62009-04-27 18:41:29 +00002534 for (int i = 0; i < e; ++i)
2535 if (!isUndefOrEqual(N->getMaskElt(e+i), i))
Evan Cheng0b457f02008-09-25 20:50:48 +00002536 return false;
2537 return true;
2538}
2539
Evan Cheng63d33002006-03-22 08:01:21 +00002540/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
2541/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP*
2542/// instructions.
2543unsigned X86::getShuffleSHUFImmediate(SDNode *N) {
Nate Begeman9008ca62009-04-27 18:41:29 +00002544 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2545 int NumOperands = SVOp->getValueType(0).getVectorNumElements();
2546
Evan Chengb9df0ca2006-03-22 02:53:00 +00002547 unsigned Shift = (NumOperands == 4) ? 2 : 1;
2548 unsigned Mask = 0;
Nate Begeman9008ca62009-04-27 18:41:29 +00002549 for (int i = 0; i < NumOperands; ++i) {
2550 int Val = SVOp->getMaskElt(NumOperands-i-1);
2551 if (Val < 0) Val = 0;
Evan Cheng14aed5e2006-03-24 01:18:28 +00002552 if (Val >= NumOperands) Val -= NumOperands;
Evan Cheng63d33002006-03-22 08:01:21 +00002553 Mask |= Val;
Evan Cheng36b27f32006-03-28 23:41:33 +00002554 if (i != NumOperands - 1)
2555 Mask <<= Shift;
2556 }
Evan Cheng63d33002006-03-22 08:01:21 +00002557 return Mask;
2558}
2559
Evan Cheng506d3df2006-03-29 23:07:14 +00002560/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
2561/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW
2562/// instructions.
2563unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) {
Nate Begeman9008ca62009-04-27 18:41:29 +00002564 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
Evan Cheng506d3df2006-03-29 23:07:14 +00002565 unsigned Mask = 0;
2566 // 8 nodes, but we only care about the last 4.
2567 for (unsigned i = 7; i >= 4; --i) {
Nate Begeman9008ca62009-04-27 18:41:29 +00002568 int Val = SVOp->getMaskElt(i);
2569 if (Val >= 0)
Mon P Wang7bcaefa2009-02-04 01:16:59 +00002570 Mask |= (Val - 4);
Evan Cheng506d3df2006-03-29 23:07:14 +00002571 if (i != 4)
2572 Mask <<= 2;
2573 }
Evan Cheng506d3df2006-03-29 23:07:14 +00002574 return Mask;
2575}
2576
2577/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
2578/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW
2579/// instructions.
2580unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) {
Nate Begeman9008ca62009-04-27 18:41:29 +00002581 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
Evan Cheng506d3df2006-03-29 23:07:14 +00002582 unsigned Mask = 0;
2583 // 8 nodes, but we only care about the first 4.
2584 for (int i = 3; i >= 0; --i) {
Nate Begeman9008ca62009-04-27 18:41:29 +00002585 int Val = SVOp->getMaskElt(i);
2586 if (Val >= 0)
2587 Mask |= Val;
Evan Cheng506d3df2006-03-29 23:07:14 +00002588 if (i != 0)
2589 Mask <<= 2;
2590 }
Evan Cheng506d3df2006-03-29 23:07:14 +00002591 return Mask;
2592}
2593
Nate Begeman9008ca62009-04-27 18:41:29 +00002594/// CommuteVectorShuffle - Swap vector_shuffle operands as well as values in
2595/// their permute mask.
2596static SDValue CommuteVectorShuffle(ShuffleVectorSDNode *SVOp,
2597 SelectionDAG &DAG) {
2598 MVT VT = SVOp->getValueType(0);
Nate Begeman5a5ca152009-04-29 05:20:52 +00002599 unsigned NumElems = VT.getVectorNumElements();
Nate Begeman9008ca62009-04-27 18:41:29 +00002600 SmallVector<int, 8> MaskVec;
2601
Nate Begeman5a5ca152009-04-29 05:20:52 +00002602 for (unsigned i = 0; i != NumElems; ++i) {
Nate Begeman9008ca62009-04-27 18:41:29 +00002603 int idx = SVOp->getMaskElt(i);
2604 if (idx < 0)
2605 MaskVec.push_back(idx);
Nate Begeman5a5ca152009-04-29 05:20:52 +00002606 else if (idx < (int)NumElems)
Nate Begeman9008ca62009-04-27 18:41:29 +00002607 MaskVec.push_back(idx + NumElems);
Evan Cheng5ced1d82006-04-06 23:23:56 +00002608 else
Nate Begeman9008ca62009-04-27 18:41:29 +00002609 MaskVec.push_back(idx - NumElems);
Evan Cheng5ced1d82006-04-06 23:23:56 +00002610 }
Nate Begeman9008ca62009-04-27 18:41:29 +00002611 return DAG.getVectorShuffle(VT, SVOp->getDebugLoc(), SVOp->getOperand(1),
2612 SVOp->getOperand(0), &MaskVec[0]);
Evan Cheng5ced1d82006-04-06 23:23:56 +00002613}
2614
Evan Cheng779ccea2007-12-07 21:30:01 +00002615/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming
2616/// the two vector operands have swapped position.
Nate Begeman9008ca62009-04-27 18:41:29 +00002617static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask, MVT VT) {
Nate Begeman5a5ca152009-04-29 05:20:52 +00002618 unsigned NumElems = VT.getVectorNumElements();
2619 for (unsigned i = 0; i != NumElems; ++i) {
Nate Begeman9008ca62009-04-27 18:41:29 +00002620 int idx = Mask[i];
2621 if (idx < 0)
Evan Cheng8a86c3f2007-12-07 08:07:39 +00002622 continue;
Nate Begeman5a5ca152009-04-29 05:20:52 +00002623 else if (idx < (int)NumElems)
Nate Begeman9008ca62009-04-27 18:41:29 +00002624 Mask[i] = idx + NumElems;
Evan Cheng8a86c3f2007-12-07 08:07:39 +00002625 else
Nate Begeman9008ca62009-04-27 18:41:29 +00002626 Mask[i] = idx - NumElems;
Evan Cheng8a86c3f2007-12-07 08:07:39 +00002627 }
Evan Cheng8a86c3f2007-12-07 08:07:39 +00002628}
2629
Evan Cheng533a0aa2006-04-19 20:35:22 +00002630/// ShouldXformToMOVHLPS - Return true if the node should be transformed to
2631/// match movhlps. The lower half elements should come from upper half of
2632/// V1 (and in order), and the upper half elements should come from the upper
Anton Korobeynikov12c49af2006-11-21 00:01:06 +00002633/// half of V2 (and in order).
Nate Begeman9008ca62009-04-27 18:41:29 +00002634static bool ShouldXformToMOVHLPS(ShuffleVectorSDNode *Op) {
2635 if (Op->getValueType(0).getVectorNumElements() != 4)
Evan Cheng533a0aa2006-04-19 20:35:22 +00002636 return false;
2637 for (unsigned i = 0, e = 2; i != e; ++i)
Nate Begeman9008ca62009-04-27 18:41:29 +00002638 if (!isUndefOrEqual(Op->getMaskElt(i), i+2))
Evan Cheng533a0aa2006-04-19 20:35:22 +00002639 return false;
2640 for (unsigned i = 2; i != 4; ++i)
Nate Begeman9008ca62009-04-27 18:41:29 +00002641 if (!isUndefOrEqual(Op->getMaskElt(i), i+4))
Evan Cheng533a0aa2006-04-19 20:35:22 +00002642 return false;
2643 return true;
2644}
2645
Evan Cheng5ced1d82006-04-06 23:23:56 +00002646/// isScalarLoadToVector - Returns true if the node is a scalar load that
Evan Cheng7e2ff772008-05-08 00:57:18 +00002647/// is promoted to a vector. It also returns the LoadSDNode by reference if
2648/// required.
2649static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = NULL) {
Evan Cheng0b457f02008-09-25 20:50:48 +00002650 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR)
2651 return false;
2652 N = N->getOperand(0).getNode();
2653 if (!ISD::isNON_EXTLoad(N))
2654 return false;
2655 if (LD)
2656 *LD = cast<LoadSDNode>(N);
2657 return true;
Evan Cheng5ced1d82006-04-06 23:23:56 +00002658}
2659
Evan Cheng533a0aa2006-04-19 20:35:22 +00002660/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to
2661/// match movlp{s|d}. The lower half elements should come from lower half of
2662/// V1 (and in order), and the upper half elements should come from the upper
2663/// half of V2 (and in order). And since V1 will become the source of the
2664/// MOVLP, it must be either a vector load or a scalar load to vector.
Nate Begeman9008ca62009-04-27 18:41:29 +00002665static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2,
2666 ShuffleVectorSDNode *Op) {
Evan Cheng466685d2006-10-09 20:57:25 +00002667 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1))
Evan Cheng533a0aa2006-04-19 20:35:22 +00002668 return false;
Evan Cheng23425f52006-10-09 21:39:25 +00002669 // Is V2 is a vector load, don't do this transformation. We will try to use
2670 // load folding shufps op.
2671 if (ISD::isNON_EXTLoad(V2))
2672 return false;
Evan Cheng5ced1d82006-04-06 23:23:56 +00002673
Nate Begeman5a5ca152009-04-29 05:20:52 +00002674 unsigned NumElems = Op->getValueType(0).getVectorNumElements();
Nate Begeman9008ca62009-04-27 18:41:29 +00002675
Evan Cheng533a0aa2006-04-19 20:35:22 +00002676 if (NumElems != 2 && NumElems != 4)
2677 return false;
Nate Begeman5a5ca152009-04-29 05:20:52 +00002678 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
Nate Begeman9008ca62009-04-27 18:41:29 +00002679 if (!isUndefOrEqual(Op->getMaskElt(i), i))
Evan Cheng533a0aa2006-04-19 20:35:22 +00002680 return false;
Nate Begeman5a5ca152009-04-29 05:20:52 +00002681 for (unsigned i = NumElems/2; i != NumElems; ++i)
Nate Begeman9008ca62009-04-27 18:41:29 +00002682 if (!isUndefOrEqual(Op->getMaskElt(i), i+NumElems))
Evan Cheng533a0aa2006-04-19 20:35:22 +00002683 return false;
2684 return true;
Evan Cheng5ced1d82006-04-06 23:23:56 +00002685}
2686
Evan Cheng39623da2006-04-20 08:58:49 +00002687/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are
2688/// all the same.
2689static bool isSplatVector(SDNode *N) {
2690 if (N->getOpcode() != ISD::BUILD_VECTOR)
2691 return false;
Evan Cheng5ced1d82006-04-06 23:23:56 +00002692
Dan Gohman475871a2008-07-27 21:46:04 +00002693 SDValue SplatValue = N->getOperand(0);
Evan Cheng39623da2006-04-20 08:58:49 +00002694 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i)
2695 if (N->getOperand(i) != SplatValue)
Evan Cheng5ced1d82006-04-06 23:23:56 +00002696 return false;
2697 return true;
2698}
2699
Evan Cheng213d2cf2007-05-17 18:45:50 +00002700/// isZeroNode - Returns true if Elt is a constant zero or a floating point
2701/// constant +0.0.
Dan Gohman475871a2008-07-27 21:46:04 +00002702static inline bool isZeroNode(SDValue Elt) {
Evan Cheng213d2cf2007-05-17 18:45:50 +00002703 return ((isa<ConstantSDNode>(Elt) &&
Dan Gohmanf5aeb1a2008-09-12 16:56:44 +00002704 cast<ConstantSDNode>(Elt)->getZExtValue() == 0) ||
Evan Cheng213d2cf2007-05-17 18:45:50 +00002705 (isa<ConstantFPSDNode>(Elt) &&
Dale Johanneseneaf08942007-08-31 04:03:46 +00002706 cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero()));
Evan Cheng213d2cf2007-05-17 18:45:50 +00002707}
2708
2709/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
Nate Begeman9008ca62009-04-27 18:41:29 +00002710/// to an zero vector.
Nate Begeman5a5ca152009-04-29 05:20:52 +00002711/// FIXME: move to dag combiner / method on ShuffleVectorSDNode
Nate Begeman9008ca62009-04-27 18:41:29 +00002712static bool isZeroShuffle(ShuffleVectorSDNode *N) {
Dan Gohman475871a2008-07-27 21:46:04 +00002713 SDValue V1 = N->getOperand(0);
2714 SDValue V2 = N->getOperand(1);
Nate Begeman5a5ca152009-04-29 05:20:52 +00002715 unsigned NumElems = N->getValueType(0).getVectorNumElements();
2716 for (unsigned i = 0; i != NumElems; ++i) {
Nate Begeman9008ca62009-04-27 18:41:29 +00002717 int Idx = N->getMaskElt(i);
Nate Begeman5a5ca152009-04-29 05:20:52 +00002718 if (Idx >= (int)NumElems) {
Nate Begeman9008ca62009-04-27 18:41:29 +00002719 unsigned Opc = V2.getOpcode();
Rafael Espindola15684b22009-04-24 12:40:33 +00002720 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode()))
2721 continue;
Nate Begeman9008ca62009-04-27 18:41:29 +00002722 if (Opc != ISD::BUILD_VECTOR || !isZeroNode(V2.getOperand(Idx-NumElems)))
2723 return false;
2724 } else if (Idx >= 0) {
2725 unsigned Opc = V1.getOpcode();
2726 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode()))
2727 continue;
2728 if (Opc != ISD::BUILD_VECTOR || !isZeroNode(V1.getOperand(Idx)))
Chris Lattner8a594482007-11-25 00:24:49 +00002729 return false;
Evan Cheng213d2cf2007-05-17 18:45:50 +00002730 }
2731 }
2732 return true;
2733}
2734
2735/// getZeroVector - Returns a vector of specified type with all zero elements.
2736///
Dale Johannesenace16102009-02-03 19:33:06 +00002737static SDValue getZeroVector(MVT VT, bool HasSSE2, SelectionDAG &DAG,
2738 DebugLoc dl) {
Duncan Sands83ec4b62008-06-06 12:08:01 +00002739 assert(VT.isVector() && "Expected a vector type");
Scott Michelfdc40a02009-02-17 22:15:04 +00002740
Chris Lattner8a594482007-11-25 00:24:49 +00002741 // Always build zero vectors as <4 x i32> or <2 x i32> bitcasted to their dest
2742 // type. This ensures they get CSE'd.
Dan Gohman475871a2008-07-27 21:46:04 +00002743 SDValue Vec;
Duncan Sands83ec4b62008-06-06 12:08:01 +00002744 if (VT.getSizeInBits() == 64) { // MMX
Dan Gohman475871a2008-07-27 21:46:04 +00002745 SDValue Cst = DAG.getTargetConstant(0, MVT::i32);
Evan Chenga87008d2009-02-25 22:49:59 +00002746 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Cst, Cst);
Evan Chengf0df0312008-05-15 08:39:06 +00002747 } else if (HasSSE2) { // SSE2
Dan Gohman475871a2008-07-27 21:46:04 +00002748 SDValue Cst = DAG.getTargetConstant(0, MVT::i32);
Evan Chenga87008d2009-02-25 22:49:59 +00002749 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
Evan Chengf0df0312008-05-15 08:39:06 +00002750 } else { // SSE1
Dan Gohman475871a2008-07-27 21:46:04 +00002751 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32);
Evan Chenga87008d2009-02-25 22:49:59 +00002752 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst);
Evan Chengf0df0312008-05-15 08:39:06 +00002753 }
Dale Johannesenace16102009-02-03 19:33:06 +00002754 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec);
Evan Cheng213d2cf2007-05-17 18:45:50 +00002755}
2756
Chris Lattner8a594482007-11-25 00:24:49 +00002757/// getOnesVector - Returns a vector of specified type with all bits set.
2758///
Dale Johannesenace16102009-02-03 19:33:06 +00002759static SDValue getOnesVector(MVT VT, SelectionDAG &DAG, DebugLoc dl) {
Duncan Sands83ec4b62008-06-06 12:08:01 +00002760 assert(VT.isVector() && "Expected a vector type");
Scott Michelfdc40a02009-02-17 22:15:04 +00002761
Chris Lattner8a594482007-11-25 00:24:49 +00002762 // Always build ones vectors as <4 x i32> or <2 x i32> bitcasted to their dest
2763 // type. This ensures they get CSE'd.
Dan Gohman475871a2008-07-27 21:46:04 +00002764 SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32);
2765 SDValue Vec;
Duncan Sands83ec4b62008-06-06 12:08:01 +00002766 if (VT.getSizeInBits() == 64) // MMX
Evan Chenga87008d2009-02-25 22:49:59 +00002767 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Cst, Cst);
Chris Lattner8a594482007-11-25 00:24:49 +00002768 else // SSE
Evan Chenga87008d2009-02-25 22:49:59 +00002769 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
Dale Johannesenace16102009-02-03 19:33:06 +00002770 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec);
Chris Lattner8a594482007-11-25 00:24:49 +00002771}
2772
2773
Evan Cheng39623da2006-04-20 08:58:49 +00002774/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
2775/// that point to V2 points to its first element.
Nate Begeman9008ca62009-04-27 18:41:29 +00002776static SDValue NormalizeMask(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
2777 MVT VT = SVOp->getValueType(0);
Nate Begeman5a5ca152009-04-29 05:20:52 +00002778 unsigned NumElems = VT.getVectorNumElements();
Nate Begeman9008ca62009-04-27 18:41:29 +00002779
Evan Cheng39623da2006-04-20 08:58:49 +00002780 bool Changed = false;
Nate Begeman9008ca62009-04-27 18:41:29 +00002781 SmallVector<int, 8> MaskVec;
2782 SVOp->getMask(MaskVec);
2783
Nate Begeman5a5ca152009-04-29 05:20:52 +00002784 for (unsigned i = 0; i != NumElems; ++i) {
2785 if (MaskVec[i] > (int)NumElems) {
Nate Begeman9008ca62009-04-27 18:41:29 +00002786 MaskVec[i] = NumElems;
2787 Changed = true;
Evan Cheng39623da2006-04-20 08:58:49 +00002788 }
Evan Cheng39623da2006-04-20 08:58:49 +00002789 }
Evan Cheng39623da2006-04-20 08:58:49 +00002790 if (Changed)
Nate Begeman9008ca62009-04-27 18:41:29 +00002791 return DAG.getVectorShuffle(VT, SVOp->getDebugLoc(), SVOp->getOperand(0),
2792 SVOp->getOperand(1), &MaskVec[0]);
2793 return SDValue(SVOp, 0);
Evan Cheng39623da2006-04-20 08:58:49 +00002794}
2795
Evan Cheng017dcc62006-04-21 01:05:10 +00002796/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
2797/// operation of specified width.
Nate Begeman9008ca62009-04-27 18:41:29 +00002798static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, MVT VT, SDValue V1,
2799 SDValue V2) {
2800 unsigned NumElems = VT.getVectorNumElements();
2801 SmallVector<int, 8> Mask;
2802 Mask.push_back(NumElems);
Evan Cheng39623da2006-04-20 08:58:49 +00002803 for (unsigned i = 1; i != NumElems; ++i)
Nate Begeman9008ca62009-04-27 18:41:29 +00002804 Mask.push_back(i);
2805 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
Evan Cheng39623da2006-04-20 08:58:49 +00002806}
2807
Nate Begeman9008ca62009-04-27 18:41:29 +00002808/// getUnpackl - Returns a vector_shuffle node for an unpackl operation.
2809static SDValue getUnpackl(SelectionDAG &DAG, DebugLoc dl, MVT VT, SDValue V1,
2810 SDValue V2) {
2811 unsigned NumElems = VT.getVectorNumElements();
2812 SmallVector<int, 8> Mask;
Evan Chengc575ca22006-04-17 20:43:08 +00002813 for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
Nate Begeman9008ca62009-04-27 18:41:29 +00002814 Mask.push_back(i);
2815 Mask.push_back(i + NumElems);
Evan Chengc575ca22006-04-17 20:43:08 +00002816 }
Nate Begeman9008ca62009-04-27 18:41:29 +00002817 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
Evan Chengc575ca22006-04-17 20:43:08 +00002818}
2819
Nate Begeman9008ca62009-04-27 18:41:29 +00002820/// getUnpackhMask - Returns a vector_shuffle node for an unpackh operation.
2821static SDValue getUnpackh(SelectionDAG &DAG, DebugLoc dl, MVT VT, SDValue V1,
2822 SDValue V2) {
2823 unsigned NumElems = VT.getVectorNumElements();
Evan Cheng39623da2006-04-20 08:58:49 +00002824 unsigned Half = NumElems/2;
Nate Begeman9008ca62009-04-27 18:41:29 +00002825 SmallVector<int, 8> Mask;
Evan Cheng39623da2006-04-20 08:58:49 +00002826 for (unsigned i = 0; i != Half; ++i) {
Nate Begeman9008ca62009-04-27 18:41:29 +00002827 Mask.push_back(i + Half);
2828 Mask.push_back(i + NumElems + Half);
Evan Cheng39623da2006-04-20 08:58:49 +00002829 }
Nate Begeman9008ca62009-04-27 18:41:29 +00002830 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
Chris Lattner62098042008-03-09 01:05:04 +00002831}
2832
Evan Cheng0c0f83f2008-04-05 00:30:36 +00002833/// PromoteSplat - Promote a splat of v4f32, v8i16 or v16i8 to v4i32.
Nate Begeman9008ca62009-04-27 18:41:29 +00002834static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG,
2835 bool HasSSE2) {
2836 if (SV->getValueType(0).getVectorNumElements() <= 4)
2837 return SDValue(SV, 0);
2838
2839 MVT PVT = MVT::v4f32;
2840 MVT VT = SV->getValueType(0);
2841 DebugLoc dl = SV->getDebugLoc();
2842 SDValue V1 = SV->getOperand(0);
2843 int NumElems = VT.getVectorNumElements();
2844 int EltNo = SV->getSplatIndex();
Rafael Espindola15684b22009-04-24 12:40:33 +00002845
Nate Begeman9008ca62009-04-27 18:41:29 +00002846 // unpack elements to the correct location
2847 while (NumElems > 4) {
2848 if (EltNo < NumElems/2) {
2849 V1 = getUnpackl(DAG, dl, VT, V1, V1);
2850 } else {
2851 V1 = getUnpackh(DAG, dl, VT, V1, V1);
2852 EltNo -= NumElems/2;
2853 }
2854 NumElems >>= 1;
2855 }
2856
2857 // Perform the splat.
2858 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo };
Dale Johannesenace16102009-02-03 19:33:06 +00002859 V1 = DAG.getNode(ISD::BIT_CONVERT, dl, PVT, V1);
Nate Begeman9008ca62009-04-27 18:41:29 +00002860 V1 = DAG.getVectorShuffle(PVT, dl, V1, DAG.getUNDEF(PVT), &SplatMask[0]);
2861 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, V1);
Evan Chengc575ca22006-04-17 20:43:08 +00002862}
2863
Evan Chengba05f722006-04-21 23:03:30 +00002864/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
Chris Lattner8a594482007-11-25 00:24:49 +00002865/// vector of zero or undef vector. This produces a shuffle where the low
2866/// element of V2 is swizzled into the zero/undef vector, landing at element
2867/// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
Dan Gohman475871a2008-07-27 21:46:04 +00002868static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
Evan Chengf0df0312008-05-15 08:39:06 +00002869 bool isZero, bool HasSSE2,
2870 SelectionDAG &DAG) {
Duncan Sands83ec4b62008-06-06 12:08:01 +00002871 MVT VT = V2.getValueType();
Dan Gohman475871a2008-07-27 21:46:04 +00002872 SDValue V1 = isZero
Nate Begeman9008ca62009-04-27 18:41:29 +00002873 ? getZeroVector(VT, HasSSE2, DAG, V2.getDebugLoc()) : DAG.getUNDEF(VT);
2874 unsigned NumElems = VT.getVectorNumElements();
2875 SmallVector<int, 16> MaskVec;
Chris Lattner8a594482007-11-25 00:24:49 +00002876 for (unsigned i = 0; i != NumElems; ++i)
Nate Begeman9008ca62009-04-27 18:41:29 +00002877 // If this is the insertion idx, put the low elt of V2 here.
2878 MaskVec.push_back(i == Idx ? NumElems : i);
2879 return DAG.getVectorShuffle(VT, V2.getDebugLoc(), V1, V2, &MaskVec[0]);
Evan Cheng017dcc62006-04-21 01:05:10 +00002880}
2881
Evan Chengf26ffe92008-05-29 08:22:04 +00002882/// getNumOfConsecutiveZeros - Return the number of elements in a result of
2883/// a shuffle that is zero.
2884static
Nate Begeman9008ca62009-04-27 18:41:29 +00002885unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp, int NumElems,
2886 bool Low, SelectionDAG &DAG) {
Evan Chengf26ffe92008-05-29 08:22:04 +00002887 unsigned NumZeros = 0;
Nate Begeman9008ca62009-04-27 18:41:29 +00002888 for (int i = 0; i < NumElems; ++i) {
Evan Chengab262272008-06-25 20:52:59 +00002889 unsigned Index = Low ? i : NumElems-i-1;
Nate Begeman9008ca62009-04-27 18:41:29 +00002890 int Idx = SVOp->getMaskElt(Index);
2891 if (Idx < 0) {
Evan Chengf26ffe92008-05-29 08:22:04 +00002892 ++NumZeros;
2893 continue;
2894 }
Nate Begeman9008ca62009-04-27 18:41:29 +00002895 SDValue Elt = DAG.getShuffleScalarElt(SVOp, Index);
Gabor Greifba36cb52008-08-28 21:40:38 +00002896 if (Elt.getNode() && isZeroNode(Elt))
Evan Chengf26ffe92008-05-29 08:22:04 +00002897 ++NumZeros;
2898 else
2899 break;
2900 }
2901 return NumZeros;
2902}
2903
2904/// isVectorShift - Returns true if the shuffle can be implemented as a
2905/// logical left or right shift of a vector.
Nate Begeman9008ca62009-04-27 18:41:29 +00002906/// FIXME: split into pslldqi, psrldqi, palignr variants.
2907static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
Dan Gohman475871a2008-07-27 21:46:04 +00002908 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
Nate Begeman9008ca62009-04-27 18:41:29 +00002909 int NumElems = SVOp->getValueType(0).getVectorNumElements();
Evan Chengf26ffe92008-05-29 08:22:04 +00002910
2911 isLeft = true;
Nate Begeman9008ca62009-04-27 18:41:29 +00002912 unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, true, DAG);
Evan Chengf26ffe92008-05-29 08:22:04 +00002913 if (!NumZeros) {
2914 isLeft = false;
Nate Begeman9008ca62009-04-27 18:41:29 +00002915 NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, false, DAG);
Evan Chengf26ffe92008-05-29 08:22:04 +00002916 if (!NumZeros)
2917 return false;
2918 }
Evan Chengf26ffe92008-05-29 08:22:04 +00002919 bool SeenV1 = false;
2920 bool SeenV2 = false;
Nate Begeman9008ca62009-04-27 18:41:29 +00002921 for (int i = NumZeros; i < NumElems; ++i) {
2922 int Val = isLeft ? (i - NumZeros) : i;
2923 int Idx = SVOp->getMaskElt(isLeft ? i : (i - NumZeros));
2924 if (Idx < 0)
Evan Chengf26ffe92008-05-29 08:22:04 +00002925 continue;
Nate Begeman9008ca62009-04-27 18:41:29 +00002926 if (Idx < NumElems)
Evan Chengf26ffe92008-05-29 08:22:04 +00002927 SeenV1 = true;
2928 else {
Nate Begeman9008ca62009-04-27 18:41:29 +00002929 Idx -= NumElems;
Evan Chengf26ffe92008-05-29 08:22:04 +00002930 SeenV2 = true;
2931 }
Nate Begeman9008ca62009-04-27 18:41:29 +00002932 if (Idx != Val)
Evan Chengf26ffe92008-05-29 08:22:04 +00002933 return false;
2934 }
2935 if (SeenV1 && SeenV2)
2936 return false;
2937
Nate Begeman9008ca62009-04-27 18:41:29 +00002938 ShVal = SeenV1 ? SVOp->getOperand(0) : SVOp->getOperand(1);
Evan Chengf26ffe92008-05-29 08:22:04 +00002939 ShAmt = NumZeros;
2940 return true;
2941}
2942
2943
Evan Chengc78d3b42006-04-24 18:01:45 +00002944/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8.
2945///
Dan Gohman475871a2008-07-27 21:46:04 +00002946static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
Evan Chengc78d3b42006-04-24 18:01:45 +00002947 unsigned NumNonZero, unsigned NumZero,
Evan Cheng25ab6902006-09-08 06:48:29 +00002948 SelectionDAG &DAG, TargetLowering &TLI) {
Evan Chengc78d3b42006-04-24 18:01:45 +00002949 if (NumNonZero > 8)
Dan Gohman475871a2008-07-27 21:46:04 +00002950 return SDValue();
Evan Chengc78d3b42006-04-24 18:01:45 +00002951
Dale Johannesen6f38cb62009-02-07 19:59:05 +00002952 DebugLoc dl = Op.getDebugLoc();
Dan Gohman475871a2008-07-27 21:46:04 +00002953 SDValue V(0, 0);
Evan Chengc78d3b42006-04-24 18:01:45 +00002954 bool First = true;
2955 for (unsigned i = 0; i < 16; ++i) {
2956 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
2957 if (ThisIsNonZero && First) {
2958 if (NumZero)
Dale Johannesenace16102009-02-03 19:33:06 +00002959 V = getZeroVector(MVT::v8i16, true, DAG, dl);
Evan Chengc78d3b42006-04-24 18:01:45 +00002960 else
Dale Johannesene8d72302009-02-06 23:05:02 +00002961 V = DAG.getUNDEF(MVT::v8i16);
Evan Chengc78d3b42006-04-24 18:01:45 +00002962 First = false;
2963 }
2964
2965 if ((i & 1) != 0) {
Dan Gohman475871a2008-07-27 21:46:04 +00002966 SDValue ThisElt(0, 0), LastElt(0, 0);
Evan Chengc78d3b42006-04-24 18:01:45 +00002967 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
2968 if (LastIsNonZero) {
Scott Michelfdc40a02009-02-17 22:15:04 +00002969 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl,
Dale Johannesenace16102009-02-03 19:33:06 +00002970 MVT::i16, Op.getOperand(i-1));
Evan Chengc78d3b42006-04-24 18:01:45 +00002971 }
2972 if (ThisIsNonZero) {
Dale Johannesenace16102009-02-03 19:33:06 +00002973 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i));
2974 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16,
Evan Chengc78d3b42006-04-24 18:01:45 +00002975 ThisElt, DAG.getConstant(8, MVT::i8));
2976 if (LastIsNonZero)
Dale Johannesenace16102009-02-03 19:33:06 +00002977 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt);
Evan Chengc78d3b42006-04-24 18:01:45 +00002978 } else
2979 ThisElt = LastElt;
2980
Gabor Greifba36cb52008-08-28 21:40:38 +00002981 if (ThisElt.getNode())
Dale Johannesenace16102009-02-03 19:33:06 +00002982 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt,
Chris Lattner0bd48932008-01-17 07:00:52 +00002983 DAG.getIntPtrConstant(i/2));
Evan Chengc78d3b42006-04-24 18:01:45 +00002984 }
2985 }
2986
Dale Johannesenace16102009-02-03 19:33:06 +00002987 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, V);
Evan Chengc78d3b42006-04-24 18:01:45 +00002988}
2989
Bill Wendlinga348c562007-03-22 18:42:45 +00002990/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16.
Evan Chengc78d3b42006-04-24 18:01:45 +00002991///
Dan Gohman475871a2008-07-27 21:46:04 +00002992static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
Evan Chengc78d3b42006-04-24 18:01:45 +00002993 unsigned NumNonZero, unsigned NumZero,
Evan Cheng25ab6902006-09-08 06:48:29 +00002994 SelectionDAG &DAG, TargetLowering &TLI) {
Evan Chengc78d3b42006-04-24 18:01:45 +00002995 if (NumNonZero > 4)
Dan Gohman475871a2008-07-27 21:46:04 +00002996 return SDValue();
Evan Chengc78d3b42006-04-24 18:01:45 +00002997
Dale Johannesen6f38cb62009-02-07 19:59:05 +00002998 DebugLoc dl = Op.getDebugLoc();
Dan Gohman475871a2008-07-27 21:46:04 +00002999 SDValue V(0, 0);
Evan Chengc78d3b42006-04-24 18:01:45 +00003000 bool First = true;
3001 for (unsigned i = 0; i < 8; ++i) {
3002 bool isNonZero = (NonZeros & (1 << i)) != 0;
3003 if (isNonZero) {
3004 if (First) {
3005 if (NumZero)
Dale Johannesenace16102009-02-03 19:33:06 +00003006 V = getZeroVector(MVT::v8i16, true, DAG, dl);
Evan Chengc78d3b42006-04-24 18:01:45 +00003007 else
Dale Johannesene8d72302009-02-06 23:05:02 +00003008 V = DAG.getUNDEF(MVT::v8i16);
Evan Chengc78d3b42006-04-24 18:01:45 +00003009 First = false;
3010 }
Scott Michelfdc40a02009-02-17 22:15:04 +00003011 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
Dale Johannesenace16102009-02-03 19:33:06 +00003012 MVT::v8i16, V, Op.getOperand(i),
Chris Lattner0bd48932008-01-17 07:00:52 +00003013 DAG.getIntPtrConstant(i));
Evan Chengc78d3b42006-04-24 18:01:45 +00003014 }
3015 }
3016
3017 return V;
3018}
3019
Evan Chengf26ffe92008-05-29 08:22:04 +00003020/// getVShift - Return a vector logical shift node.
3021///
Dan Gohman475871a2008-07-27 21:46:04 +00003022static SDValue getVShift(bool isLeft, MVT VT, SDValue SrcOp,
Nate Begeman9008ca62009-04-27 18:41:29 +00003023 unsigned NumBits, SelectionDAG &DAG,
3024 const TargetLowering &TLI, DebugLoc dl) {
Duncan Sands83ec4b62008-06-06 12:08:01 +00003025 bool isMMX = VT.getSizeInBits() == 64;
3026 MVT ShVT = isMMX ? MVT::v1i64 : MVT::v2i64;
Evan Chengf26ffe92008-05-29 08:22:04 +00003027 unsigned Opc = isLeft ? X86ISD::VSHL : X86ISD::VSRL;
Dale Johannesenace16102009-02-03 19:33:06 +00003028 SrcOp = DAG.getNode(ISD::BIT_CONVERT, dl, ShVT, SrcOp);
3029 return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
3030 DAG.getNode(Opc, dl, ShVT, SrcOp,
Gabor Greif327ef032008-08-28 23:19:51 +00003031 DAG.getConstant(NumBits, TLI.getShiftAmountTy())));
Evan Chengf26ffe92008-05-29 08:22:04 +00003032}
3033
Dan Gohman475871a2008-07-27 21:46:04 +00003034SDValue
3035X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
Dale Johannesen6f38cb62009-02-07 19:59:05 +00003036 DebugLoc dl = Op.getDebugLoc();
Chris Lattner8a594482007-11-25 00:24:49 +00003037 // All zero's are handled with pxor, all one's are handled with pcmpeqd.
Gabor Greif327ef032008-08-28 23:19:51 +00003038 if (ISD::isBuildVectorAllZeros(Op.getNode())
3039 || ISD::isBuildVectorAllOnes(Op.getNode())) {
Chris Lattner8a594482007-11-25 00:24:49 +00003040 // Canonicalize this to either <4 x i32> or <2 x i32> (SSE vs MMX) to
3041 // 1) ensure the zero vectors are CSE'd, and 2) ensure that i64 scalars are
3042 // eliminated on x86-32 hosts.
3043 if (Op.getValueType() == MVT::v4i32 || Op.getValueType() == MVT::v2i32)
3044 return Op;
Evan Cheng0db9fe62006-04-25 20:13:52 +00003045
Gabor Greifba36cb52008-08-28 21:40:38 +00003046 if (ISD::isBuildVectorAllOnes(Op.getNode()))
Dale Johannesenace16102009-02-03 19:33:06 +00003047 return getOnesVector(Op.getValueType(), DAG, dl);
3048 return getZeroVector(Op.getValueType(), Subtarget->hasSSE2(), DAG, dl);
Chris Lattner8a594482007-11-25 00:24:49 +00003049 }
Evan Cheng0db9fe62006-04-25 20:13:52 +00003050
Duncan Sands83ec4b62008-06-06 12:08:01 +00003051 MVT VT = Op.getValueType();
3052 MVT EVT = VT.getVectorElementType();
3053 unsigned EVTBits = EVT.getSizeInBits();
Evan Cheng0db9fe62006-04-25 20:13:52 +00003054
3055 unsigned NumElems = Op.getNumOperands();
3056 unsigned NumZero = 0;
3057 unsigned NumNonZero = 0;
3058 unsigned NonZeros = 0;
Chris Lattnerc9517fb2008-03-08 22:48:29 +00003059 bool IsAllConstants = true;
Dan Gohman475871a2008-07-27 21:46:04 +00003060 SmallSet<SDValue, 8> Values;
Evan Cheng0db9fe62006-04-25 20:13:52 +00003061 for (unsigned i = 0; i < NumElems; ++i) {
Dan Gohman475871a2008-07-27 21:46:04 +00003062 SDValue Elt = Op.getOperand(i);
Evan Chengdb2d5242007-12-12 06:45:40 +00003063 if (Elt.getOpcode() == ISD::UNDEF)
3064 continue;
3065 Values.insert(Elt);
3066 if (Elt.getOpcode() != ISD::Constant &&
3067 Elt.getOpcode() != ISD::ConstantFP)
Chris Lattnerc9517fb2008-03-08 22:48:29 +00003068 IsAllConstants = false;
Evan Chengdb2d5242007-12-12 06:45:40 +00003069 if (isZeroNode(Elt))
3070 NumZero++;
3071 else {
3072 NonZeros |= (1 << i);
3073 NumNonZero++;
Evan Cheng0db9fe62006-04-25 20:13:52 +00003074 }
3075 }
3076
Dan Gohman7f321562007-06-25 16:23:39 +00003077 if (NumNonZero == 0) {
Chris Lattner8a594482007-11-25 00:24:49 +00003078 // All undef vector. Return an UNDEF. All zero vectors were handled above.
Dale Johannesene8d72302009-02-06 23:05:02 +00003079 return DAG.getUNDEF(VT);
Dan Gohman7f321562007-06-25 16:23:39 +00003080 }
Evan Cheng0db9fe62006-04-25 20:13:52 +00003081
Chris Lattner67f453a2008-03-09 05:42:06 +00003082 // Special case for single non-zero, non-undef, element.
Eli Friedman10415532009-06-06 06:05:10 +00003083 if (NumNonZero == 1) {
Evan Cheng0db9fe62006-04-25 20:13:52 +00003084 unsigned Idx = CountTrailingZeros_32(NonZeros);
Dan Gohman475871a2008-07-27 21:46:04 +00003085 SDValue Item = Op.getOperand(Idx);
Scott Michelfdc40a02009-02-17 22:15:04 +00003086
Chris Lattner62098042008-03-09 01:05:04 +00003087 // If this is an insertion of an i64 value on x86-32, and if the top bits of
3088 // the value are obviously zero, truncate the value to i32 and do the
3089 // insertion that way. Only do this if the value is non-constant or if the
3090 // value is a constant being inserted into element 0. It is cheaper to do
3091 // a constant pool load than it is to do a movd + shuffle.
3092 if (EVT == MVT::i64 && !Subtarget->is64Bit() &&
3093 (!IsAllConstants || Idx == 0)) {
3094 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) {
3095 // Handle MMX and SSE both.
Duncan Sands83ec4b62008-06-06 12:08:01 +00003096 MVT VecVT = VT == MVT::v2i64 ? MVT::v4i32 : MVT::v2i32;
3097 unsigned VecElts = VT == MVT::v2i64 ? 4 : 2;
Scott Michelfdc40a02009-02-17 22:15:04 +00003098
Chris Lattner62098042008-03-09 01:05:04 +00003099 // Truncate the value (which may itself be a constant) to i32, and
3100 // convert it to a vector with movd (S2V+shuffle to zero extend).
Dale Johannesenace16102009-02-03 19:33:06 +00003101 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item);
3102 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item);
Evan Chengf0df0312008-05-15 08:39:06 +00003103 Item = getShuffleVectorZeroOrUndef(Item, 0, true,
3104 Subtarget->hasSSE2(), DAG);
Scott Michelfdc40a02009-02-17 22:15:04 +00003105
Chris Lattner62098042008-03-09 01:05:04 +00003106 // Now we have our 32-bit value zero extended in the low element of
3107 // a vector. If Idx != 0, swizzle it into place.
3108 if (Idx != 0) {
Nate Begeman9008ca62009-04-27 18:41:29 +00003109 SmallVector<int, 4> Mask;
3110 Mask.push_back(Idx);
3111 for (unsigned i = 1; i != VecElts; ++i)
3112 Mask.push_back(i);
3113 Item = DAG.getVectorShuffle(VecVT, dl, Item,
3114 DAG.getUNDEF(Item.getValueType()),
3115 &Mask[0]);
Chris Lattner62098042008-03-09 01:05:04 +00003116 }
Dale Johannesenace16102009-02-03 19:33:06 +00003117 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Item);
Chris Lattner62098042008-03-09 01:05:04 +00003118 }
3119 }
Scott Michelfdc40a02009-02-17 22:15:04 +00003120
Chris Lattner19f79692008-03-08 22:59:52 +00003121 // If we have a constant or non-constant insertion into the low element of
3122 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
3123 // the rest of the elements. This will be matched as movd/movq/movss/movsd
Eli Friedman10415532009-06-06 06:05:10 +00003124 // depending on what the source datatype is.
3125 if (Idx == 0) {
3126 if (NumZero == 0) {
3127 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
3128 } else if (EVT == MVT::i32 || EVT == MVT::f32 || EVT == MVT::f64 ||
3129 (EVT == MVT::i64 && Subtarget->is64Bit())) {
3130 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
3131 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
3132 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget->hasSSE2(),
3133 DAG);
3134 } else if (EVT == MVT::i16 || EVT == MVT::i8) {
3135 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
3136 MVT MiddleVT = VT.getSizeInBits() == 64 ? MVT::v2i32 : MVT::v4i32;
3137 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MiddleVT, Item);
3138 Item = getShuffleVectorZeroOrUndef(Item, 0, true,
3139 Subtarget->hasSSE2(), DAG);
3140 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Item);
3141 }
Chris Lattnerc9517fb2008-03-08 22:48:29 +00003142 }
Evan Chengf26ffe92008-05-29 08:22:04 +00003143
3144 // Is it a vector logical left shift?
3145 if (NumElems == 2 && Idx == 1 &&
3146 isZeroNode(Op.getOperand(0)) && !isZeroNode(Op.getOperand(1))) {
Duncan Sands83ec4b62008-06-06 12:08:01 +00003147 unsigned NumBits = VT.getSizeInBits();
Evan Chengf26ffe92008-05-29 08:22:04 +00003148 return getVShift(true, VT,
Scott Michelfdc40a02009-02-17 22:15:04 +00003149 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
Dale Johannesenb300d2a2009-02-07 00:55:49 +00003150 VT, Op.getOperand(1)),
Dale Johannesenace16102009-02-03 19:33:06 +00003151 NumBits/2, DAG, *this, dl);
Evan Chengf26ffe92008-05-29 08:22:04 +00003152 }
Scott Michelfdc40a02009-02-17 22:15:04 +00003153
Chris Lattnerc9517fb2008-03-08 22:48:29 +00003154 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
Dan Gohman475871a2008-07-27 21:46:04 +00003155 return SDValue();
Evan Cheng0db9fe62006-04-25 20:13:52 +00003156
Chris Lattner19f79692008-03-08 22:59:52 +00003157 // Otherwise, if this is a vector with i32 or f32 elements, and the element
3158 // is a non-constant being inserted into an element other than the low one,
3159 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
3160 // movd/movss) to move this into the low element, then shuffle it into
3161 // place.
Evan Cheng0db9fe62006-04-25 20:13:52 +00003162 if (EVTBits == 32) {
Dale Johannesenace16102009-02-03 19:33:06 +00003163 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
Scott Michelfdc40a02009-02-17 22:15:04 +00003164
Evan Cheng0db9fe62006-04-25 20:13:52 +00003165 // Turn it into a shuffle of zero and zero-extended scalar to vector.
Evan Chengf0df0312008-05-15 08:39:06 +00003166 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0,
3167 Subtarget->hasSSE2(), DAG);
Nate Begeman9008ca62009-04-27 18:41:29 +00003168 SmallVector<int, 8> MaskVec;
Evan Cheng0db9fe62006-04-25 20:13:52 +00003169 for (unsigned i = 0; i < NumElems; i++)
Nate Begeman9008ca62009-04-27 18:41:29 +00003170 MaskVec.push_back(i == Idx ? 0 : 1);
3171 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]);
Evan Cheng0db9fe62006-04-25 20:13:52 +00003172 }
3173 }
3174
Chris Lattner67f453a2008-03-09 05:42:06 +00003175 // Splat is obviously ok. Let legalizer expand it to a shuffle.
3176 if (Values.size() == 1)
Dan Gohman475871a2008-07-27 21:46:04 +00003177 return SDValue();
Scott Michelfdc40a02009-02-17 22:15:04 +00003178
Dan Gohmana3941172007-07-24 22:55:08 +00003179 // A vector full of immediates; various special cases are already
3180 // handled, so this is best done with a single constant-pool load.
Chris Lattnerc9517fb2008-03-08 22:48:29 +00003181 if (IsAllConstants)
Dan Gohman475871a2008-07-27 21:46:04 +00003182 return SDValue();
Dan Gohmana3941172007-07-24 22:55:08 +00003183
Bill Wendling2f9bb1a2007-04-24 21:16:55 +00003184 // Let legalizer expand 2-wide build_vectors.
Evan Cheng7e2ff772008-05-08 00:57:18 +00003185 if (EVTBits == 64) {
3186 if (NumNonZero == 1) {
3187 // One half is zero or undef.
3188 unsigned Idx = CountTrailingZeros_32(NonZeros);
Dale Johannesenace16102009-02-03 19:33:06 +00003189 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
Evan Cheng7e2ff772008-05-08 00:57:18 +00003190 Op.getOperand(Idx));
Evan Chengf0df0312008-05-15 08:39:06 +00003191 return getShuffleVectorZeroOrUndef(V2, Idx, true,
3192 Subtarget->hasSSE2(), DAG);
Evan Cheng7e2ff772008-05-08 00:57:18 +00003193 }
Dan Gohman475871a2008-07-27 21:46:04 +00003194 return SDValue();
Evan Cheng7e2ff772008-05-08 00:57:18 +00003195 }
Evan Cheng0db9fe62006-04-25 20:13:52 +00003196
3197 // If element VT is < 32 bits, convert it to inserts into a zero vector.
Bill Wendling826f36f2007-03-28 00:57:11 +00003198 if (EVTBits == 8 && NumElems == 16) {
Dan Gohman475871a2008-07-27 21:46:04 +00003199 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG,
Evan Cheng25ab6902006-09-08 06:48:29 +00003200 *this);
Gabor Greifba36cb52008-08-28 21:40:38 +00003201 if (V.getNode()) return V;
Evan Cheng0db9fe62006-04-25 20:13:52 +00003202 }
3203
Bill Wendling826f36f2007-03-28 00:57:11 +00003204 if (EVTBits == 16 && NumElems == 8) {
Dan Gohman475871a2008-07-27 21:46:04 +00003205 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG,
Evan Cheng25ab6902006-09-08 06:48:29 +00003206 *this);
Gabor Greifba36cb52008-08-28 21:40:38 +00003207 if (V.getNode()) return V;
Evan Cheng0db9fe62006-04-25 20:13:52 +00003208 }
3209
3210 // If element VT is == 32 bits, turn it into a number of shuffles.
Dan Gohman475871a2008-07-27 21:46:04 +00003211 SmallVector<SDValue, 8> V;
Chris Lattner5a88b832007-02-25 07:10:00 +00003212 V.resize(NumElems);
Evan Cheng0db9fe62006-04-25 20:13:52 +00003213 if (NumElems == 4 && NumZero > 0) {
3214 for (unsigned i = 0; i < 4; ++i) {
3215 bool isZero = !(NonZeros & (1 << i));
3216 if (isZero)
Dale Johannesenace16102009-02-03 19:33:06 +00003217 V[i] = getZeroVector(VT, Subtarget->hasSSE2(), DAG, dl);
Evan Cheng0db9fe62006-04-25 20:13:52 +00003218 else
Dale Johannesenace16102009-02-03 19:33:06 +00003219 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
Evan Cheng0db9fe62006-04-25 20:13:52 +00003220 }
3221
3222 for (unsigned i = 0; i < 2; ++i) {
3223 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
3224 default: break;
3225 case 0:
3226 V[i] = V[i*2]; // Must be a zero vector.
3227 break;
3228 case 1:
Nate Begeman9008ca62009-04-27 18:41:29 +00003229 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]);
Evan Cheng0db9fe62006-04-25 20:13:52 +00003230 break;
3231 case 2:
Nate Begeman9008ca62009-04-27 18:41:29 +00003232 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]);
Evan Cheng0db9fe62006-04-25 20:13:52 +00003233 break;
3234 case 3:
Nate Begeman9008ca62009-04-27 18:41:29 +00003235 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]);
Evan Cheng0db9fe62006-04-25 20:13:52 +00003236 break;
3237 }
3238 }
3239
Nate Begeman9008ca62009-04-27 18:41:29 +00003240 SmallVector<int, 8> MaskVec;
Evan Cheng0db9fe62006-04-25 20:13:52 +00003241 bool Reverse = (NonZeros & 0x3) == 2;
3242 for (unsigned i = 0; i < 2; ++i)
Nate Begeman9008ca62009-04-27 18:41:29 +00003243 MaskVec.push_back(Reverse ? 1-i : i);
Evan Cheng0db9fe62006-04-25 20:13:52 +00003244 Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2;
3245 for (unsigned i = 0; i < 2; ++i)
Nate Begeman9008ca62009-04-27 18:41:29 +00003246 MaskVec.push_back(Reverse ? 1-i+NumElems : i+NumElems);
3247 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]);
Evan Cheng0db9fe62006-04-25 20:13:52 +00003248 }
3249
3250 if (Values.size() > 2) {
Nate Begeman9008ca62009-04-27 18:41:29 +00003251 // If we have SSE 4.1, Expand into a number of inserts unless the number of
3252 // values to be inserted is equal to the number of elements, in which case
3253 // use the unpack code below in the hopes of matching the consecutive elts
3254 // load merge pattern for shuffles.
3255 // FIXME: We could probably just check that here directly.
3256 if (Values.size() < NumElems && VT.getSizeInBits() == 128 &&
3257 getSubtarget()->hasSSE41()) {
3258 V[0] = DAG.getUNDEF(VT);
3259 for (unsigned i = 0; i < NumElems; ++i)
3260 if (Op.getOperand(i).getOpcode() != ISD::UNDEF)
3261 V[0] = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, V[0],
3262 Op.getOperand(i), DAG.getIntPtrConstant(i));
3263 return V[0];
3264 }
Evan Cheng0db9fe62006-04-25 20:13:52 +00003265 // Expand into a number of unpckl*.
3266 // e.g. for v4f32
3267 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
3268 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
3269 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
Evan Cheng0db9fe62006-04-25 20:13:52 +00003270 for (unsigned i = 0; i < NumElems; ++i)
Dale Johannesenace16102009-02-03 19:33:06 +00003271 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
Evan Cheng0db9fe62006-04-25 20:13:52 +00003272 NumElems >>= 1;
3273 while (NumElems != 0) {
3274 for (unsigned i = 0; i < NumElems; ++i)
Nate Begeman9008ca62009-04-27 18:41:29 +00003275 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + NumElems]);
Evan Cheng0db9fe62006-04-25 20:13:52 +00003276 NumElems >>= 1;
3277 }
3278 return V[0];
3279 }
3280
Dan Gohman475871a2008-07-27 21:46:04 +00003281 return SDValue();
Evan Cheng0db9fe62006-04-25 20:13:52 +00003282}
3283
Nate Begemanb9a47b82009-02-23 08:49:38 +00003284// v8i16 shuffles - Prefer shuffles in the following order:
3285// 1. [all] pshuflw, pshufhw, optional move
3286// 2. [ssse3] 1 x pshufb
3287// 3. [ssse3] 2 x pshufb + 1 x por
3288// 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw)
Evan Cheng8a86c3f2007-12-07 08:07:39 +00003289static
Nate Begeman9008ca62009-04-27 18:41:29 +00003290SDValue LowerVECTOR_SHUFFLEv8i16(ShuffleVectorSDNode *SVOp,
3291 SelectionDAG &DAG, X86TargetLowering &TLI) {
3292 SDValue V1 = SVOp->getOperand(0);
3293 SDValue V2 = SVOp->getOperand(1);
3294 DebugLoc dl = SVOp->getDebugLoc();
Nate Begemanb9a47b82009-02-23 08:49:38 +00003295 SmallVector<int, 8> MaskVals;
Evan Cheng14b32e12007-12-11 01:46:18 +00003296
Nate Begemanb9a47b82009-02-23 08:49:38 +00003297 // Determine if more than 1 of the words in each of the low and high quadwords
3298 // of the result come from the same quadword of one of the two inputs. Undef
3299 // mask values count as coming from any quadword, for better codegen.
3300 SmallVector<unsigned, 4> LoQuad(4);
3301 SmallVector<unsigned, 4> HiQuad(4);
3302 BitVector InputQuads(4);
3303 for (unsigned i = 0; i < 8; ++i) {
3304 SmallVectorImpl<unsigned> &Quad = i < 4 ? LoQuad : HiQuad;
Nate Begeman9008ca62009-04-27 18:41:29 +00003305 int EltIdx = SVOp->getMaskElt(i);
Nate Begemanb9a47b82009-02-23 08:49:38 +00003306 MaskVals.push_back(EltIdx);
3307 if (EltIdx < 0) {
3308 ++Quad[0];
3309 ++Quad[1];
3310 ++Quad[2];
3311 ++Quad[3];
Evan Cheng14b32e12007-12-11 01:46:18 +00003312 continue;
Nate Begemanb9a47b82009-02-23 08:49:38 +00003313 }
3314 ++Quad[EltIdx / 4];
3315 InputQuads.set(EltIdx / 4);
Evan Cheng14b32e12007-12-11 01:46:18 +00003316 }
Bill Wendlinge85dc492008-08-21 22:35:37 +00003317
Nate Begemanb9a47b82009-02-23 08:49:38 +00003318 int BestLoQuad = -1;
Evan Cheng14b32e12007-12-11 01:46:18 +00003319 unsigned MaxQuad = 1;
3320 for (unsigned i = 0; i < 4; ++i) {
Nate Begemanb9a47b82009-02-23 08:49:38 +00003321 if (LoQuad[i] > MaxQuad) {
3322 BestLoQuad = i;
3323 MaxQuad = LoQuad[i];
Evan Cheng14b32e12007-12-11 01:46:18 +00003324 }
Evan Cheng8a86c3f2007-12-07 08:07:39 +00003325 }
3326
Nate Begemanb9a47b82009-02-23 08:49:38 +00003327 int BestHiQuad = -1;
Evan Cheng14b32e12007-12-11 01:46:18 +00003328 MaxQuad = 1;
3329 for (unsigned i = 0; i < 4; ++i) {
Nate Begemanb9a47b82009-02-23 08:49:38 +00003330 if (HiQuad[i] > MaxQuad) {
3331 BestHiQuad = i;
3332 MaxQuad = HiQuad[i];
Evan Cheng14b32e12007-12-11 01:46:18 +00003333 }
3334 }
3335
Nate Begemanb9a47b82009-02-23 08:49:38 +00003336 // For SSSE3, If all 8 words of the result come from only 1 quadword of each
3337 // of the two input vectors, shuffle them into one input vector so only a
3338 // single pshufb instruction is necessary. If There are more than 2 input
3339 // quads, disable the next transformation since it does not help SSSE3.
3340 bool V1Used = InputQuads[0] || InputQuads[1];
3341 bool V2Used = InputQuads[2] || InputQuads[3];
3342 if (TLI.getSubtarget()->hasSSSE3()) {
3343 if (InputQuads.count() == 2 && V1Used && V2Used) {
3344 BestLoQuad = InputQuads.find_first();
3345 BestHiQuad = InputQuads.find_next(BestLoQuad);
3346 }
3347 if (InputQuads.count() > 2) {
3348 BestLoQuad = -1;
3349 BestHiQuad = -1;
3350 }
3351 }
Bill Wendlinge85dc492008-08-21 22:35:37 +00003352
Nate Begemanb9a47b82009-02-23 08:49:38 +00003353 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update
3354 // the shuffle mask. If a quad is scored as -1, that means that it contains
3355 // words from all 4 input quadwords.
3356 SDValue NewV;
3357 if (BestLoQuad >= 0 || BestHiQuad >= 0) {
Nate Begeman9008ca62009-04-27 18:41:29 +00003358 SmallVector<int, 8> MaskV;
3359 MaskV.push_back(BestLoQuad < 0 ? 0 : BestLoQuad);
3360 MaskV.push_back(BestHiQuad < 0 ? 1 : BestHiQuad);
3361 NewV = DAG.getVectorShuffle(MVT::v2i64, dl,
3362 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, V1),
3363 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, V2), &MaskV[0]);
Dale Johannesenace16102009-02-03 19:33:06 +00003364 NewV = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, NewV);
Evan Cheng14b32e12007-12-11 01:46:18 +00003365
Nate Begemanb9a47b82009-02-23 08:49:38 +00003366 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the
3367 // source words for the shuffle, to aid later transformations.
3368 bool AllWordsInNewV = true;
Mon P Wang37b9a192009-03-11 06:35:11 +00003369 bool InOrder[2] = { true, true };
Evan Cheng14b32e12007-12-11 01:46:18 +00003370 for (unsigned i = 0; i != 8; ++i) {
Nate Begemanb9a47b82009-02-23 08:49:38 +00003371 int idx = MaskVals[i];
Mon P Wang37b9a192009-03-11 06:35:11 +00003372 if (idx != (int)i)
3373 InOrder[i/4] = false;
Nate Begemanb9a47b82009-02-23 08:49:38 +00003374 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad)
Evan Cheng14b32e12007-12-11 01:46:18 +00003375 continue;
Nate Begemanb9a47b82009-02-23 08:49:38 +00003376 AllWordsInNewV = false;
3377 break;
Evan Cheng14b32e12007-12-11 01:46:18 +00003378 }
Bill Wendlinge85dc492008-08-21 22:35:37 +00003379
Nate Begemanb9a47b82009-02-23 08:49:38 +00003380 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV;
3381 if (AllWordsInNewV) {
3382 for (int i = 0; i != 8; ++i) {
3383 int idx = MaskVals[i];
3384 if (idx < 0)
Evan Cheng14b32e12007-12-11 01:46:18 +00003385 continue;
Nate Begemanb9a47b82009-02-23 08:49:38 +00003386 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4;
3387 if ((idx != i) && idx < 4)
3388 pshufhw = false;
3389 if ((idx != i) && idx > 3)
3390 pshuflw = false;
Evan Cheng14b32e12007-12-11 01:46:18 +00003391 }
Nate Begemanb9a47b82009-02-23 08:49:38 +00003392 V1 = NewV;
3393 V2Used = false;
3394 BestLoQuad = 0;
3395 BestHiQuad = 1;
Evan Cheng8a86c3f2007-12-07 08:07:39 +00003396 }
Evan Cheng14b32e12007-12-11 01:46:18 +00003397
Nate Begemanb9a47b82009-02-23 08:49:38 +00003398 // If we've eliminated the use of V2, and the new mask is a pshuflw or
3399 // pshufhw, that's as cheap as it gets. Return the new shuffle.
Mon P Wang37b9a192009-03-11 06:35:11 +00003400 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) {
Nate Begeman9008ca62009-04-27 18:41:29 +00003401 return DAG.getVectorShuffle(MVT::v8i16, dl, NewV,
3402 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]);
Evan Cheng14b32e12007-12-11 01:46:18 +00003403 }
Evan Cheng14b32e12007-12-11 01:46:18 +00003404 }
Nate Begemanb9a47b82009-02-23 08:49:38 +00003405
3406 // If we have SSSE3, and all words of the result are from 1 input vector,
3407 // case 2 is generated, otherwise case 3 is generated. If no SSSE3
3408 // is present, fall back to case 4.
3409 if (TLI.getSubtarget()->hasSSSE3()) {
3410 SmallVector<SDValue,16> pshufbMask;
3411
3412 // If we have elements from both input vectors, set the high bit of the
3413 // shuffle mask element to zero out elements that come from V2 in the V1
3414 // mask, and elements that come from V1 in the V2 mask, so that the two
3415 // results can be OR'd together.
3416 bool TwoInputs = V1Used && V2Used;
3417 for (unsigned i = 0; i != 8; ++i) {
3418 int EltIdx = MaskVals[i] * 2;
3419 if (TwoInputs && (EltIdx >= 16)) {
3420 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
3421 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
3422 continue;
3423 }
3424 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
3425 pshufbMask.push_back(DAG.getConstant(EltIdx+1, MVT::i8));
3426 }
3427 V1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, V1);
3428 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1,
Evan Chenga87008d2009-02-25 22:49:59 +00003429 DAG.getNode(ISD::BUILD_VECTOR, dl,
3430 MVT::v16i8, &pshufbMask[0], 16));
Nate Begemanb9a47b82009-02-23 08:49:38 +00003431 if (!TwoInputs)
3432 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, V1);
3433
3434 // Calculate the shuffle mask for the second input, shuffle it, and
3435 // OR it with the first shuffled input.
3436 pshufbMask.clear();
3437 for (unsigned i = 0; i != 8; ++i) {
3438 int EltIdx = MaskVals[i] * 2;
3439 if (EltIdx < 16) {
3440 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
3441 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
3442 continue;
3443 }
3444 pshufbMask.push_back(DAG.getConstant(EltIdx - 16, MVT::i8));
3445 pshufbMask.push_back(DAG.getConstant(EltIdx - 15, MVT::i8));
3446 }
3447 V2 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, V2);
3448 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2,
Evan Chenga87008d2009-02-25 22:49:59 +00003449 DAG.getNode(ISD::BUILD_VECTOR, dl,
3450 MVT::v16i8, &pshufbMask[0], 16));
Nate Begemanb9a47b82009-02-23 08:49:38 +00003451 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
3452 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, V1);
3453 }
3454
3455 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order,
3456 // and update MaskVals with new element order.
3457 BitVector InOrder(8);
3458 if (BestLoQuad >= 0) {
Nate Begeman9008ca62009-04-27 18:41:29 +00003459 SmallVector<int, 8> MaskV;
Nate Begemanb9a47b82009-02-23 08:49:38 +00003460 for (int i = 0; i != 4; ++i) {
3461 int idx = MaskVals[i];
3462 if (idx < 0) {
Nate Begeman9008ca62009-04-27 18:41:29 +00003463 MaskV.push_back(-1);
Nate Begemanb9a47b82009-02-23 08:49:38 +00003464 InOrder.set(i);
3465 } else if ((idx / 4) == BestLoQuad) {
Nate Begeman9008ca62009-04-27 18:41:29 +00003466 MaskV.push_back(idx & 3);
Nate Begemanb9a47b82009-02-23 08:49:38 +00003467 InOrder.set(i);
3468 } else {
Nate Begeman9008ca62009-04-27 18:41:29 +00003469 MaskV.push_back(-1);
Nate Begemanb9a47b82009-02-23 08:49:38 +00003470 }
3471 }
3472 for (unsigned i = 4; i != 8; ++i)
Nate Begeman9008ca62009-04-27 18:41:29 +00003473 MaskV.push_back(i);
3474 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
3475 &MaskV[0]);
Nate Begemanb9a47b82009-02-23 08:49:38 +00003476 }
3477
3478 // If BestHi >= 0, generate a pshufhw to put the high elements in order,
3479 // and update MaskVals with the new element order.
3480 if (BestHiQuad >= 0) {
Nate Begeman9008ca62009-04-27 18:41:29 +00003481 SmallVector<int, 8> MaskV;
Nate Begemanb9a47b82009-02-23 08:49:38 +00003482 for (unsigned i = 0; i != 4; ++i)
Nate Begeman9008ca62009-04-27 18:41:29 +00003483 MaskV.push_back(i);
Nate Begemanb9a47b82009-02-23 08:49:38 +00003484 for (unsigned i = 4; i != 8; ++i) {
3485 int idx = MaskVals[i];
3486 if (idx < 0) {
Nate Begeman9008ca62009-04-27 18:41:29 +00003487 MaskV.push_back(-1);
Nate Begemanb9a47b82009-02-23 08:49:38 +00003488 InOrder.set(i);
3489 } else if ((idx / 4) == BestHiQuad) {
Nate Begeman9008ca62009-04-27 18:41:29 +00003490 MaskV.push_back((idx & 3) + 4);
Nate Begemanb9a47b82009-02-23 08:49:38 +00003491 InOrder.set(i);
3492 } else {
Nate Begeman9008ca62009-04-27 18:41:29 +00003493 MaskV.push_back(-1);
Nate Begemanb9a47b82009-02-23 08:49:38 +00003494 }
3495 }
Nate Begeman9008ca62009-04-27 18:41:29 +00003496 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
3497 &MaskV[0]);
Nate Begemanb9a47b82009-02-23 08:49:38 +00003498 }
3499
3500 // In case BestHi & BestLo were both -1, which means each quadword has a word
3501 // from each of the four input quadwords, calculate the InOrder bitvector now
3502 // before falling through to the insert/extract cleanup.
3503 if (BestLoQuad == -1 && BestHiQuad == -1) {
3504 NewV = V1;
3505 for (int i = 0; i != 8; ++i)
3506 if (MaskVals[i] < 0 || MaskVals[i] == i)
3507 InOrder.set(i);
3508 }
3509
3510 // The other elements are put in the right place using pextrw and pinsrw.
3511 for (unsigned i = 0; i != 8; ++i) {
3512 if (InOrder[i])
3513 continue;
3514 int EltIdx = MaskVals[i];
3515 if (EltIdx < 0)
3516 continue;
3517 SDValue ExtOp = (EltIdx < 8)
3518 ? DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1,
3519 DAG.getIntPtrConstant(EltIdx))
3520 : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2,
3521 DAG.getIntPtrConstant(EltIdx - 8));
3522 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp,
3523 DAG.getIntPtrConstant(i));
3524 }
3525 return NewV;
3526}
3527
3528// v16i8 shuffles - Prefer shuffles in the following order:
3529// 1. [ssse3] 1 x pshufb
3530// 2. [ssse3] 2 x pshufb + 1 x por
3531// 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw
3532static
Nate Begeman9008ca62009-04-27 18:41:29 +00003533SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
3534 SelectionDAG &DAG, X86TargetLowering &TLI) {
3535 SDValue V1 = SVOp->getOperand(0);
3536 SDValue V2 = SVOp->getOperand(1);
3537 DebugLoc dl = SVOp->getDebugLoc();
Nate Begemanb9a47b82009-02-23 08:49:38 +00003538 SmallVector<int, 16> MaskVals;
Nate Begeman9008ca62009-04-27 18:41:29 +00003539 SVOp->getMask(MaskVals);
Nate Begemanb9a47b82009-02-23 08:49:38 +00003540
3541 // If we have SSSE3, case 1 is generated when all result bytes come from
3542 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is
3543 // present, fall back to case 3.
3544 // FIXME: kill V2Only once shuffles are canonizalized by getNode.
3545 bool V1Only = true;
3546 bool V2Only = true;
3547 for (unsigned i = 0; i < 16; ++i) {
Nate Begeman9008ca62009-04-27 18:41:29 +00003548 int EltIdx = MaskVals[i];
Nate Begemanb9a47b82009-02-23 08:49:38 +00003549 if (EltIdx < 0)
3550 continue;
3551 if (EltIdx < 16)
3552 V2Only = false;
3553 else
3554 V1Only = false;
3555 }
3556
3557 // If SSSE3, use 1 pshufb instruction per vector with elements in the result.
3558 if (TLI.getSubtarget()->hasSSSE3()) {
3559 SmallVector<SDValue,16> pshufbMask;
3560
3561 // If all result elements are from one input vector, then only translate
3562 // undef mask values to 0x80 (zero out result) in the pshufb mask.
3563 //
3564 // Otherwise, we have elements from both input vectors, and must zero out
3565 // elements that come from V2 in the first mask, and V1 in the second mask
3566 // so that we can OR them together.
3567 bool TwoInputs = !(V1Only || V2Only);
3568 for (unsigned i = 0; i != 16; ++i) {
3569 int EltIdx = MaskVals[i];
3570 if (EltIdx < 0 || (TwoInputs && EltIdx >= 16)) {
3571 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
3572 continue;
3573 }
3574 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
3575 }
3576 // If all the elements are from V2, assign it to V1 and return after
3577 // building the first pshufb.
3578 if (V2Only)
3579 V1 = V2;
3580 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1,
Evan Chenga87008d2009-02-25 22:49:59 +00003581 DAG.getNode(ISD::BUILD_VECTOR, dl,
3582 MVT::v16i8, &pshufbMask[0], 16));
Nate Begemanb9a47b82009-02-23 08:49:38 +00003583 if (!TwoInputs)
3584 return V1;
3585
3586 // Calculate the shuffle mask for the second input, shuffle it, and
3587 // OR it with the first shuffled input.
3588 pshufbMask.clear();
3589 for (unsigned i = 0; i != 16; ++i) {
3590 int EltIdx = MaskVals[i];
3591 if (EltIdx < 16) {
3592 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
3593 continue;
3594 }
3595 pshufbMask.push_back(DAG.getConstant(EltIdx - 16, MVT::i8));
3596 }
3597 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2,
Evan Chenga87008d2009-02-25 22:49:59 +00003598 DAG.getNode(ISD::BUILD_VECTOR, dl,
3599 MVT::v16i8, &pshufbMask[0], 16));
Nate Begemanb9a47b82009-02-23 08:49:38 +00003600 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
3601 }
3602
3603 // No SSSE3 - Calculate in place words and then fix all out of place words
3604 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from
3605 // the 16 different words that comprise the two doublequadword input vectors.
3606 V1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, V1);
3607 V2 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, V2);
3608 SDValue NewV = V2Only ? V2 : V1;
3609 for (int i = 0; i != 8; ++i) {
3610 int Elt0 = MaskVals[i*2];
3611 int Elt1 = MaskVals[i*2+1];
3612
3613 // This word of the result is all undef, skip it.
3614 if (Elt0 < 0 && Elt1 < 0)
3615 continue;
3616
3617 // This word of the result is already in the correct place, skip it.
3618 if (V1Only && (Elt0 == i*2) && (Elt1 == i*2+1))
3619 continue;
3620 if (V2Only && (Elt0 == i*2+16) && (Elt1 == i*2+17))
3621 continue;
3622
3623 SDValue Elt0Src = Elt0 < 16 ? V1 : V2;
3624 SDValue Elt1Src = Elt1 < 16 ? V1 : V2;
3625 SDValue InsElt;
Mon P Wang6b3ef692009-03-11 18:47:57 +00003626
3627 // If Elt0 and Elt1 are defined, are consecutive, and can be load
3628 // using a single extract together, load it and store it.
3629 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) {
3630 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
3631 DAG.getIntPtrConstant(Elt1 / 2));
3632 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
3633 DAG.getIntPtrConstant(i));
3634 continue;
3635 }
3636
Nate Begemanb9a47b82009-02-23 08:49:38 +00003637 // If Elt1 is defined, extract it from the appropriate source. If the
Mon P Wang6b3ef692009-03-11 18:47:57 +00003638 // source byte is not also odd, shift the extracted word left 8 bits
3639 // otherwise clear the bottom 8 bits if we need to do an or.
Nate Begemanb9a47b82009-02-23 08:49:38 +00003640 if (Elt1 >= 0) {
3641 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
3642 DAG.getIntPtrConstant(Elt1 / 2));
3643 if ((Elt1 & 1) == 0)
3644 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt,
3645 DAG.getConstant(8, TLI.getShiftAmountTy()));
Mon P Wang6b3ef692009-03-11 18:47:57 +00003646 else if (Elt0 >= 0)
3647 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt,
3648 DAG.getConstant(0xFF00, MVT::i16));
Nate Begemanb9a47b82009-02-23 08:49:38 +00003649 }
3650 // If Elt0 is defined, extract it from the appropriate source. If the
3651 // source byte is not also even, shift the extracted word right 8 bits. If
3652 // Elt1 was also defined, OR the extracted values together before
3653 // inserting them in the result.
3654 if (Elt0 >= 0) {
3655 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
3656 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2));
3657 if ((Elt0 & 1) != 0)
3658 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0,
3659 DAG.getConstant(8, TLI.getShiftAmountTy()));
Mon P Wang6b3ef692009-03-11 18:47:57 +00003660 else if (Elt1 >= 0)
3661 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0,
3662 DAG.getConstant(0x00FF, MVT::i16));
Nate Begemanb9a47b82009-02-23 08:49:38 +00003663 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0)
3664 : InsElt0;
3665 }
3666 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
3667 DAG.getIntPtrConstant(i));
3668 }
3669 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, NewV);
Evan Cheng14b32e12007-12-11 01:46:18 +00003670}
3671
Evan Cheng7a831ce2007-12-15 03:00:47 +00003672/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
3673/// ones, or rewriting v4i32 / v2f32 as 2 wide ones if possible. This can be
3674/// done when every pair / quad of shuffle mask elements point to elements in
3675/// the right sequence. e.g.
Evan Cheng14b32e12007-12-11 01:46:18 +00003676/// vector_shuffle <>, <>, < 3, 4, | 10, 11, | 0, 1, | 14, 15>
3677static
Nate Begeman9008ca62009-04-27 18:41:29 +00003678SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
3679 SelectionDAG &DAG,
3680 TargetLowering &TLI, DebugLoc dl) {
3681 MVT VT = SVOp->getValueType(0);
3682 SDValue V1 = SVOp->getOperand(0);
3683 SDValue V2 = SVOp->getOperand(1);
3684 unsigned NumElems = VT.getVectorNumElements();
Evan Cheng7a831ce2007-12-15 03:00:47 +00003685 unsigned NewWidth = (NumElems == 4) ? 2 : 4;
Duncan Sands83ec4b62008-06-06 12:08:01 +00003686 MVT MaskVT = MVT::getIntVectorWithNumElements(NewWidth);
Duncan Sandsd038e042008-07-21 10:20:31 +00003687 MVT MaskEltVT = MaskVT.getVectorElementType();
Duncan Sands83ec4b62008-06-06 12:08:01 +00003688 MVT NewVT = MaskVT;
3689 switch (VT.getSimpleVT()) {
3690 default: assert(false && "Unexpected!");
Evan Cheng7a831ce2007-12-15 03:00:47 +00003691 case MVT::v4f32: NewVT = MVT::v2f64; break;
3692 case MVT::v4i32: NewVT = MVT::v2i64; break;
3693 case MVT::v8i16: NewVT = MVT::v4i32; break;
3694 case MVT::v16i8: NewVT = MVT::v4i32; break;
Evan Cheng7a831ce2007-12-15 03:00:47 +00003695 }
3696
Anton Korobeynikov7c1c2612008-02-20 11:22:39 +00003697 if (NewWidth == 2) {
Duncan Sands83ec4b62008-06-06 12:08:01 +00003698 if (VT.isInteger())
Evan Cheng7a831ce2007-12-15 03:00:47 +00003699 NewVT = MVT::v2i64;
3700 else
3701 NewVT = MVT::v2f64;
Anton Korobeynikov7c1c2612008-02-20 11:22:39 +00003702 }
Nate Begeman9008ca62009-04-27 18:41:29 +00003703 int Scale = NumElems / NewWidth;
3704 SmallVector<int, 8> MaskVec;
Evan Cheng14b32e12007-12-11 01:46:18 +00003705 for (unsigned i = 0; i < NumElems; i += Scale) {
Nate Begeman9008ca62009-04-27 18:41:29 +00003706 int StartIdx = -1;
3707 for (int j = 0; j < Scale; ++j) {
3708 int EltIdx = SVOp->getMaskElt(i+j);
3709 if (EltIdx < 0)
Evan Cheng14b32e12007-12-11 01:46:18 +00003710 continue;
Nate Begeman9008ca62009-04-27 18:41:29 +00003711 if (StartIdx == -1)
Evan Cheng14b32e12007-12-11 01:46:18 +00003712 StartIdx = EltIdx - (EltIdx % Scale);
3713 if (EltIdx != StartIdx + j)
Dan Gohman475871a2008-07-27 21:46:04 +00003714 return SDValue();
Evan Cheng14b32e12007-12-11 01:46:18 +00003715 }
Nate Begeman9008ca62009-04-27 18:41:29 +00003716 if (StartIdx == -1)
3717 MaskVec.push_back(-1);
Evan Cheng14b32e12007-12-11 01:46:18 +00003718 else
Nate Begeman9008ca62009-04-27 18:41:29 +00003719 MaskVec.push_back(StartIdx / Scale);
Evan Cheng8a86c3f2007-12-07 08:07:39 +00003720 }
3721
Dale Johannesenace16102009-02-03 19:33:06 +00003722 V1 = DAG.getNode(ISD::BIT_CONVERT, dl, NewVT, V1);
3723 V2 = DAG.getNode(ISD::BIT_CONVERT, dl, NewVT, V2);
Nate Begeman9008ca62009-04-27 18:41:29 +00003724 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]);
Evan Cheng8a86c3f2007-12-07 08:07:39 +00003725}
3726
Evan Chengd880b972008-05-09 21:53:03 +00003727/// getVZextMovL - Return a zero-extending vector move low node.
Evan Cheng7e2ff772008-05-08 00:57:18 +00003728///
Dan Gohman475871a2008-07-27 21:46:04 +00003729static SDValue getVZextMovL(MVT VT, MVT OpVT,
Nate Begeman9008ca62009-04-27 18:41:29 +00003730 SDValue SrcOp, SelectionDAG &DAG,
3731 const X86Subtarget *Subtarget, DebugLoc dl) {
Evan Cheng7e2ff772008-05-08 00:57:18 +00003732 if (VT == MVT::v2f64 || VT == MVT::v4f32) {
3733 LoadSDNode *LD = NULL;
Gabor Greifba36cb52008-08-28 21:40:38 +00003734 if (!isScalarLoadToVector(SrcOp.getNode(), &LD))
Evan Cheng7e2ff772008-05-08 00:57:18 +00003735 LD = dyn_cast<LoadSDNode>(SrcOp);
3736 if (!LD) {
3737 // movssrr and movsdrr do not clear top bits. Try to use movd, movq
3738 // instead.
Duncan Sands83ec4b62008-06-06 12:08:01 +00003739 MVT EVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32;
Evan Cheng7e2ff772008-05-08 00:57:18 +00003740 if ((EVT != MVT::i64 || Subtarget->is64Bit()) &&
3741 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR &&
3742 SrcOp.getOperand(0).getOpcode() == ISD::BIT_CONVERT &&
3743 SrcOp.getOperand(0).getOperand(0).getValueType() == EVT) {
3744 // PR2108
3745 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32;
Dale Johannesenace16102009-02-03 19:33:06 +00003746 return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
3747 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
3748 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
3749 OpVT,
Gabor Greif327ef032008-08-28 23:19:51 +00003750 SrcOp.getOperand(0)
3751 .getOperand(0))));
Evan Cheng7e2ff772008-05-08 00:57:18 +00003752 }
3753 }
3754 }
3755
Dale Johannesenace16102009-02-03 19:33:06 +00003756 return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
3757 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
Scott Michelfdc40a02009-02-17 22:15:04 +00003758 DAG.getNode(ISD::BIT_CONVERT, dl,
Dale Johannesenace16102009-02-03 19:33:06 +00003759 OpVT, SrcOp)));
Evan Cheng7e2ff772008-05-08 00:57:18 +00003760}
3761
Evan Chengace3c172008-07-22 21:13:36 +00003762/// LowerVECTOR_SHUFFLE_4wide - Handle all 4 wide cases with a number of
3763/// shuffles.
Dan Gohman475871a2008-07-27 21:46:04 +00003764static SDValue
Nate Begeman9008ca62009-04-27 18:41:29 +00003765LowerVECTOR_SHUFFLE_4wide(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
3766 SDValue V1 = SVOp->getOperand(0);
3767 SDValue V2 = SVOp->getOperand(1);
3768 DebugLoc dl = SVOp->getDebugLoc();
3769 MVT VT = SVOp->getValueType(0);
3770
Evan Chengace3c172008-07-22 21:13:36 +00003771 SmallVector<std::pair<int, int>, 8> Locs;
Rafael Espindola833a9902008-08-28 18:32:53 +00003772 Locs.resize(4);
Nate Begeman9008ca62009-04-27 18:41:29 +00003773 SmallVector<int, 8> Mask1(4U, -1);
3774 SmallVector<int, 8> PermMask;
3775 SVOp->getMask(PermMask);
3776
Evan Chengace3c172008-07-22 21:13:36 +00003777 unsigned NumHi = 0;
3778 unsigned NumLo = 0;
Evan Chengace3c172008-07-22 21:13:36 +00003779 for (unsigned i = 0; i != 4; ++i) {
Nate Begeman9008ca62009-04-27 18:41:29 +00003780 int Idx = PermMask[i];
3781 if (Idx < 0) {
Evan Chengace3c172008-07-22 21:13:36 +00003782 Locs[i] = std::make_pair(-1, -1);
3783 } else {
Nate Begeman9008ca62009-04-27 18:41:29 +00003784 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!");
3785 if (Idx < 4) {
Evan Chengace3c172008-07-22 21:13:36 +00003786 Locs[i] = std::make_pair(0, NumLo);
Nate Begeman9008ca62009-04-27 18:41:29 +00003787 Mask1[NumLo] = Idx;
Evan Chengace3c172008-07-22 21:13:36 +00003788 NumLo++;
3789 } else {
3790 Locs[i] = std::make_pair(1, NumHi);
3791 if (2+NumHi < 4)
Nate Begeman9008ca62009-04-27 18:41:29 +00003792 Mask1[2+NumHi] = Idx;
Evan Chengace3c172008-07-22 21:13:36 +00003793 NumHi++;
3794 }
3795 }
3796 }
Evan Cheng5e6ebaf2008-07-23 00:22:17 +00003797
Evan Chengace3c172008-07-22 21:13:36 +00003798 if (NumLo <= 2 && NumHi <= 2) {
Evan Cheng5e6ebaf2008-07-23 00:22:17 +00003799 // If no more than two elements come from either vector. This can be
3800 // implemented with two shuffles. First shuffle gather the elements.
3801 // The second shuffle, which takes the first shuffle as both of its
3802 // vector operands, put the elements into the right order.
Nate Begeman9008ca62009-04-27 18:41:29 +00003803 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
Evan Cheng5e6ebaf2008-07-23 00:22:17 +00003804
Nate Begeman9008ca62009-04-27 18:41:29 +00003805 SmallVector<int, 8> Mask2(4U, -1);
3806
Evan Chengace3c172008-07-22 21:13:36 +00003807 for (unsigned i = 0; i != 4; ++i) {
3808 if (Locs[i].first == -1)
3809 continue;
3810 else {
3811 unsigned Idx = (i < 2) ? 0 : 4;
3812 Idx += Locs[i].first * 2 + Locs[i].second;
Nate Begeman9008ca62009-04-27 18:41:29 +00003813 Mask2[i] = Idx;
Evan Chengace3c172008-07-22 21:13:36 +00003814 }
3815 }
3816
Nate Begeman9008ca62009-04-27 18:41:29 +00003817 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]);
Evan Cheng5e6ebaf2008-07-23 00:22:17 +00003818 } else if (NumLo == 3 || NumHi == 3) {
3819 // Otherwise, we must have three elements from one vector, call it X, and
3820 // one element from the other, call it Y. First, use a shufps to build an
3821 // intermediate vector with the one element from Y and the element from X
3822 // that will be in the same half in the final destination (the indexes don't
3823 // matter). Then, use a shufps to build the final vector, taking the half
3824 // containing the element from Y from the intermediate, and the other half
3825 // from X.
3826 if (NumHi == 3) {
3827 // Normalize it so the 3 elements come from V1.
Nate Begeman9008ca62009-04-27 18:41:29 +00003828 CommuteVectorShuffleMask(PermMask, VT);
Evan Cheng5e6ebaf2008-07-23 00:22:17 +00003829 std::swap(V1, V2);
3830 }
3831
3832 // Find the element from V2.
3833 unsigned HiIndex;
3834 for (HiIndex = 0; HiIndex < 3; ++HiIndex) {
Nate Begeman9008ca62009-04-27 18:41:29 +00003835 int Val = PermMask[HiIndex];
3836 if (Val < 0)
Evan Cheng5e6ebaf2008-07-23 00:22:17 +00003837 continue;
Evan Cheng5e6ebaf2008-07-23 00:22:17 +00003838 if (Val >= 4)
3839 break;
3840 }
3841
Nate Begeman9008ca62009-04-27 18:41:29 +00003842 Mask1[0] = PermMask[HiIndex];
3843 Mask1[1] = -1;
3844 Mask1[2] = PermMask[HiIndex^1];
3845 Mask1[3] = -1;
3846 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
Evan Cheng5e6ebaf2008-07-23 00:22:17 +00003847
3848 if (HiIndex >= 2) {
Nate Begeman9008ca62009-04-27 18:41:29 +00003849 Mask1[0] = PermMask[0];
3850 Mask1[1] = PermMask[1];
3851 Mask1[2] = HiIndex & 1 ? 6 : 4;
3852 Mask1[3] = HiIndex & 1 ? 4 : 6;
3853 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
Evan Cheng5e6ebaf2008-07-23 00:22:17 +00003854 } else {
Nate Begeman9008ca62009-04-27 18:41:29 +00003855 Mask1[0] = HiIndex & 1 ? 2 : 0;
3856 Mask1[1] = HiIndex & 1 ? 0 : 2;
3857 Mask1[2] = PermMask[2];
3858 Mask1[3] = PermMask[3];
3859 if (Mask1[2] >= 0)
3860 Mask1[2] += 4;
3861 if (Mask1[3] >= 0)
3862 Mask1[3] += 4;
3863 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]);
Evan Cheng5e6ebaf2008-07-23 00:22:17 +00003864 }
Evan Chengace3c172008-07-22 21:13:36 +00003865 }
3866
3867 // Break it into (shuffle shuffle_hi, shuffle_lo).
3868 Locs.clear();
Nate Begeman9008ca62009-04-27 18:41:29 +00003869 SmallVector<int,8> LoMask(4U, -1);
3870 SmallVector<int,8> HiMask(4U, -1);
3871
3872 SmallVector<int,8> *MaskPtr = &LoMask;
Evan Chengace3c172008-07-22 21:13:36 +00003873 unsigned MaskIdx = 0;
3874 unsigned LoIdx = 0;
3875 unsigned HiIdx = 2;
3876 for (unsigned i = 0; i != 4; ++i) {
3877 if (i == 2) {
3878 MaskPtr = &HiMask;
3879 MaskIdx = 1;
3880 LoIdx = 0;
3881 HiIdx = 2;
3882 }
Nate Begeman9008ca62009-04-27 18:41:29 +00003883 int Idx = PermMask[i];
3884 if (Idx < 0) {
Evan Chengace3c172008-07-22 21:13:36 +00003885 Locs[i] = std::make_pair(-1, -1);
Nate Begeman9008ca62009-04-27 18:41:29 +00003886 } else if (Idx < 4) {
Evan Chengace3c172008-07-22 21:13:36 +00003887 Locs[i] = std::make_pair(MaskIdx, LoIdx);
Nate Begeman9008ca62009-04-27 18:41:29 +00003888 (*MaskPtr)[LoIdx] = Idx;
Evan Chengace3c172008-07-22 21:13:36 +00003889 LoIdx++;
3890 } else {
3891 Locs[i] = std::make_pair(MaskIdx, HiIdx);
Nate Begeman9008ca62009-04-27 18:41:29 +00003892 (*MaskPtr)[HiIdx] = Idx;
Evan Chengace3c172008-07-22 21:13:36 +00003893 HiIdx++;
3894 }
3895 }
3896
Nate Begeman9008ca62009-04-27 18:41:29 +00003897 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]);
3898 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]);
3899 SmallVector<int, 8> MaskOps;
Evan Chengace3c172008-07-22 21:13:36 +00003900 for (unsigned i = 0; i != 4; ++i) {
3901 if (Locs[i].first == -1) {
Nate Begeman9008ca62009-04-27 18:41:29 +00003902 MaskOps.push_back(-1);
Evan Chengace3c172008-07-22 21:13:36 +00003903 } else {
3904 unsigned Idx = Locs[i].first * 4 + Locs[i].second;
Nate Begeman9008ca62009-04-27 18:41:29 +00003905 MaskOps.push_back(Idx);
Evan Chengace3c172008-07-22 21:13:36 +00003906 }
3907 }
Nate Begeman9008ca62009-04-27 18:41:29 +00003908 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]);
Evan Chengace3c172008-07-22 21:13:36 +00003909}
3910
Dan Gohman475871a2008-07-27 21:46:04 +00003911SDValue
3912X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
Nate Begeman9008ca62009-04-27 18:41:29 +00003913 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
Dan Gohman475871a2008-07-27 21:46:04 +00003914 SDValue V1 = Op.getOperand(0);
3915 SDValue V2 = Op.getOperand(1);
Duncan Sands83ec4b62008-06-06 12:08:01 +00003916 MVT VT = Op.getValueType();
Dale Johannesen6f38cb62009-02-07 19:59:05 +00003917 DebugLoc dl = Op.getDebugLoc();
Nate Begeman9008ca62009-04-27 18:41:29 +00003918 unsigned NumElems = VT.getVectorNumElements();
Duncan Sands83ec4b62008-06-06 12:08:01 +00003919 bool isMMX = VT.getSizeInBits() == 64;
Evan Cheng0db9fe62006-04-25 20:13:52 +00003920 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
3921 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
Evan Chengd9b8e402006-10-16 06:36:00 +00003922 bool V1IsSplat = false;
3923 bool V2IsSplat = false;
Evan Cheng0db9fe62006-04-25 20:13:52 +00003924
Nate Begeman9008ca62009-04-27 18:41:29 +00003925 if (isZeroShuffle(SVOp))
Dale Johannesenace16102009-02-03 19:33:06 +00003926 return getZeroVector(VT, Subtarget->hasSSE2(), DAG, dl);
Evan Cheng213d2cf2007-05-17 18:45:50 +00003927
Nate Begeman9008ca62009-04-27 18:41:29 +00003928 // Promote splats to v4f32.
3929 if (SVOp->isSplat()) {
3930 if (isMMX || NumElems < 4)
3931 return Op;
3932 return PromoteSplat(SVOp, DAG, Subtarget->hasSSE2());
Evan Cheng0db9fe62006-04-25 20:13:52 +00003933 }
3934
Evan Cheng7a831ce2007-12-15 03:00:47 +00003935 // If the shuffle can be profitably rewritten as a narrower shuffle, then
3936 // do it!
3937 if (VT == MVT::v8i16 || VT == MVT::v16i8) {
Nate Begeman9008ca62009-04-27 18:41:29 +00003938 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, *this, dl);
Gabor Greifba36cb52008-08-28 21:40:38 +00003939 if (NewOp.getNode())
Scott Michelfdc40a02009-02-17 22:15:04 +00003940 return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
Dale Johannesenace16102009-02-03 19:33:06 +00003941 LowerVECTOR_SHUFFLE(NewOp, DAG));
Evan Cheng7a831ce2007-12-15 03:00:47 +00003942 } else if ((VT == MVT::v4i32 || (VT == MVT::v4f32 && Subtarget->hasSSE2()))) {
3943 // FIXME: Figure out a cleaner way to do this.
3944 // Try to make use of movq to zero out the top part.
Gabor Greifba36cb52008-08-28 21:40:38 +00003945 if (ISD::isBuildVectorAllZeros(V2.getNode())) {
Nate Begeman9008ca62009-04-27 18:41:29 +00003946 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, *this, dl);
Gabor Greifba36cb52008-08-28 21:40:38 +00003947 if (NewOp.getNode()) {
Nate Begeman9008ca62009-04-27 18:41:29 +00003948 if (isCommutedMOVL(cast<ShuffleVectorSDNode>(NewOp), true, false))
3949 return getVZextMovL(VT, NewOp.getValueType(), NewOp.getOperand(0),
3950 DAG, Subtarget, dl);
Evan Cheng7a831ce2007-12-15 03:00:47 +00003951 }
Gabor Greifba36cb52008-08-28 21:40:38 +00003952 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) {
Nate Begeman9008ca62009-04-27 18:41:29 +00003953 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, *this, dl);
3954 if (NewOp.getNode() && X86::isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)))
Evan Chengd880b972008-05-09 21:53:03 +00003955 return getVZextMovL(VT, NewOp.getValueType(), NewOp.getOperand(1),
Nate Begeman9008ca62009-04-27 18:41:29 +00003956 DAG, Subtarget, dl);
Evan Cheng7a831ce2007-12-15 03:00:47 +00003957 }
3958 }
Nate Begeman9008ca62009-04-27 18:41:29 +00003959
3960 if (X86::isPSHUFDMask(SVOp))
3961 return Op;
3962
Evan Chengf26ffe92008-05-29 08:22:04 +00003963 // Check if this can be converted into a logical shift.
3964 bool isLeft = false;
3965 unsigned ShAmt = 0;
Dan Gohman475871a2008-07-27 21:46:04 +00003966 SDValue ShVal;
Nate Begeman9008ca62009-04-27 18:41:29 +00003967 bool isShift = getSubtarget()->hasSSE2() &&
3968 isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt);
Evan Chengf26ffe92008-05-29 08:22:04 +00003969 if (isShift && ShVal.hasOneUse()) {
Scott Michelfdc40a02009-02-17 22:15:04 +00003970 // If the shifted value has multiple uses, it may be cheaper to use
Evan Chengf26ffe92008-05-29 08:22:04 +00003971 // v_set0 + movlhps or movhlps, etc.
Duncan Sands83ec4b62008-06-06 12:08:01 +00003972 MVT EVT = VT.getVectorElementType();
3973 ShAmt *= EVT.getSizeInBits();
Dale Johannesenace16102009-02-03 19:33:06 +00003974 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
Evan Chengf26ffe92008-05-29 08:22:04 +00003975 }
Nate Begeman9008ca62009-04-27 18:41:29 +00003976
3977 if (X86::isMOVLMask(SVOp)) {
Evan Cheng7e2ff772008-05-08 00:57:18 +00003978 if (V1IsUndef)
3979 return V2;
Gabor Greifba36cb52008-08-28 21:40:38 +00003980 if (ISD::isBuildVectorAllZeros(V1.getNode()))
Dale Johannesenace16102009-02-03 19:33:06 +00003981 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl);
Nate Begemanfb8ead02008-07-25 19:05:58 +00003982 if (!isMMX)
3983 return Op;
Evan Cheng7e2ff772008-05-08 00:57:18 +00003984 }
Nate Begeman9008ca62009-04-27 18:41:29 +00003985
3986 // FIXME: fold these into legal mask.
3987 if (!isMMX && (X86::isMOVSHDUPMask(SVOp) ||
3988 X86::isMOVSLDUPMask(SVOp) ||
3989 X86::isMOVHLPSMask(SVOp) ||
3990 X86::isMOVHPMask(SVOp) ||
3991 X86::isMOVLPMask(SVOp)))
Evan Cheng9bbbb982006-10-25 20:48:19 +00003992 return Op;
Evan Cheng0db9fe62006-04-25 20:13:52 +00003993
Nate Begeman9008ca62009-04-27 18:41:29 +00003994 if (ShouldXformToMOVHLPS(SVOp) ||
3995 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), SVOp))
3996 return CommuteVectorShuffle(SVOp, DAG);
Evan Cheng0db9fe62006-04-25 20:13:52 +00003997
Evan Chengf26ffe92008-05-29 08:22:04 +00003998 if (isShift) {
3999 // No better options. Use a vshl / vsrl.
Duncan Sands83ec4b62008-06-06 12:08:01 +00004000 MVT EVT = VT.getVectorElementType();
4001 ShAmt *= EVT.getSizeInBits();
Dale Johannesenace16102009-02-03 19:33:06 +00004002 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
Evan Chengf26ffe92008-05-29 08:22:04 +00004003 }
Nate Begeman9008ca62009-04-27 18:41:29 +00004004
Evan Cheng9eca5e82006-10-25 21:49:50 +00004005 bool Commuted = false;
Chris Lattner8a594482007-11-25 00:24:49 +00004006 // FIXME: This should also accept a bitcast of a splat? Be careful, not
4007 // 1,1,1,1 -> v8i16 though.
Gabor Greifba36cb52008-08-28 21:40:38 +00004008 V1IsSplat = isSplatVector(V1.getNode());
4009 V2IsSplat = isSplatVector(V2.getNode());
Scott Michelfdc40a02009-02-17 22:15:04 +00004010
Chris Lattner8a594482007-11-25 00:24:49 +00004011 // Canonicalize the splat or undef, if present, to be on the RHS.
Evan Cheng9bbbb982006-10-25 20:48:19 +00004012 if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) {
Nate Begeman9008ca62009-04-27 18:41:29 +00004013 Op = CommuteVectorShuffle(SVOp, DAG);
4014 SVOp = cast<ShuffleVectorSDNode>(Op);
4015 V1 = SVOp->getOperand(0);
4016 V2 = SVOp->getOperand(1);
Evan Cheng9bbbb982006-10-25 20:48:19 +00004017 std::swap(V1IsSplat, V2IsSplat);
4018 std::swap(V1IsUndef, V2IsUndef);
Evan Cheng9eca5e82006-10-25 21:49:50 +00004019 Commuted = true;
Evan Cheng9bbbb982006-10-25 20:48:19 +00004020 }
4021
Nate Begeman9008ca62009-04-27 18:41:29 +00004022 if (isCommutedMOVL(SVOp, V2IsSplat, V2IsUndef)) {
4023 // Shuffling low element of v1 into undef, just return v1.
4024 if (V2IsUndef)
4025 return V1;
4026 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which
4027 // the instruction selector will not match, so get a canonical MOVL with
4028 // swapped operands to undo the commute.
4029 return getMOVL(DAG, dl, VT, V2, V1);
Evan Chengd9b8e402006-10-16 06:36:00 +00004030 }
Evan Cheng0db9fe62006-04-25 20:13:52 +00004031
Nate Begeman9008ca62009-04-27 18:41:29 +00004032 if (X86::isUNPCKL_v_undef_Mask(SVOp) ||
4033 X86::isUNPCKH_v_undef_Mask(SVOp) ||
4034 X86::isUNPCKLMask(SVOp) ||
4035 X86::isUNPCKHMask(SVOp))
Evan Chengd9b8e402006-10-16 06:36:00 +00004036 return Op;
Evan Chenge1113032006-10-04 18:33:38 +00004037
Evan Cheng9bbbb982006-10-25 20:48:19 +00004038 if (V2IsSplat) {
4039 // Normalize mask so all entries that point to V2 points to its first
Anton Korobeynikov12c49af2006-11-21 00:01:06 +00004040 // element then try to match unpck{h|l} again. If match, return a
Evan Cheng9bbbb982006-10-25 20:48:19 +00004041 // new vector_shuffle with the corrected mask.
Nate Begeman9008ca62009-04-27 18:41:29 +00004042 SDValue NewMask = NormalizeMask(SVOp, DAG);
4043 ShuffleVectorSDNode *NSVOp = cast<ShuffleVectorSDNode>(NewMask);
4044 if (NSVOp != SVOp) {
4045 if (X86::isUNPCKLMask(NSVOp, true)) {
4046 return NewMask;
4047 } else if (X86::isUNPCKHMask(NSVOp, true)) {
4048 return NewMask;
Evan Cheng0db9fe62006-04-25 20:13:52 +00004049 }
4050 }
4051 }
4052
Evan Cheng9eca5e82006-10-25 21:49:50 +00004053 if (Commuted) {
4054 // Commute is back and try unpck* again.
Nate Begeman9008ca62009-04-27 18:41:29 +00004055 // FIXME: this seems wrong.
4056 SDValue NewOp = CommuteVectorShuffle(SVOp, DAG);
4057 ShuffleVectorSDNode *NewSVOp = cast<ShuffleVectorSDNode>(NewOp);
4058 if (X86::isUNPCKL_v_undef_Mask(NewSVOp) ||
4059 X86::isUNPCKH_v_undef_Mask(NewSVOp) ||
4060 X86::isUNPCKLMask(NewSVOp) ||
4061 X86::isUNPCKHMask(NewSVOp))
4062 return NewOp;
Evan Cheng9eca5e82006-10-25 21:49:50 +00004063 }
Evan Cheng0db9fe62006-04-25 20:13:52 +00004064
Nate Begemanb9a47b82009-02-23 08:49:38 +00004065 // FIXME: for mmx, bitcast v2i32 to v4i16 for shuffle.
Nate Begeman9008ca62009-04-27 18:41:29 +00004066
4067 // Normalize the node to match x86 shuffle ops if needed
4068 if (!isMMX && V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(SVOp))
4069 return CommuteVectorShuffle(SVOp, DAG);
4070
4071 // Check for legal shuffle and return?
4072 SmallVector<int, 16> PermMask;
4073 SVOp->getMask(PermMask);
4074 if (isShuffleMaskLegal(PermMask, VT))
Evan Cheng0c0f83f2008-04-05 00:30:36 +00004075 return Op;
Nate Begeman9008ca62009-04-27 18:41:29 +00004076
Evan Cheng14b32e12007-12-11 01:46:18 +00004077 // Handle v8i16 specifically since SSE can do byte extraction and insertion.
4078 if (VT == MVT::v8i16) {
Nate Begeman9008ca62009-04-27 18:41:29 +00004079 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(SVOp, DAG, *this);
Gabor Greifba36cb52008-08-28 21:40:38 +00004080 if (NewOp.getNode())
Evan Cheng14b32e12007-12-11 01:46:18 +00004081 return NewOp;
4082 }
4083
Nate Begemanb9a47b82009-02-23 08:49:38 +00004084 if (VT == MVT::v16i8) {
Nate Begeman9008ca62009-04-27 18:41:29 +00004085 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, DAG, *this);
Nate Begemanb9a47b82009-02-23 08:49:38 +00004086 if (NewOp.getNode())
4087 return NewOp;
4088 }
4089
Evan Chengace3c172008-07-22 21:13:36 +00004090 // Handle all 4 wide cases with a number of shuffles except for MMX.
4091 if (NumElems == 4 && !isMMX)
Nate Begeman9008ca62009-04-27 18:41:29 +00004092 return LowerVECTOR_SHUFFLE_4wide(SVOp, DAG);
Evan Cheng0db9fe62006-04-25 20:13:52 +00004093
Dan Gohman475871a2008-07-27 21:46:04 +00004094 return SDValue();
Evan Cheng0db9fe62006-04-25 20:13:52 +00004095}
4096
Dan Gohman475871a2008-07-27 21:46:04 +00004097SDValue
4098X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op,
Nate Begeman14d12ca2008-02-11 04:19:36 +00004099 SelectionDAG &DAG) {
Duncan Sands83ec4b62008-06-06 12:08:01 +00004100 MVT VT = Op.getValueType();
Dale Johannesen6f38cb62009-02-07 19:59:05 +00004101 DebugLoc dl = Op.getDebugLoc();
Duncan Sands83ec4b62008-06-06 12:08:01 +00004102 if (VT.getSizeInBits() == 8) {
Dale Johannesenace16102009-02-03 19:33:06 +00004103 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
Nate Begeman14d12ca2008-02-11 04:19:36 +00004104 Op.getOperand(0), Op.getOperand(1));
Dale Johannesenace16102009-02-03 19:33:06 +00004105 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
Nate Begeman14d12ca2008-02-11 04:19:36 +00004106 DAG.getValueType(VT));
Dale Johannesenace16102009-02-03 19:33:06 +00004107 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
Duncan Sands83ec4b62008-06-06 12:08:01 +00004108 } else if (VT.getSizeInBits() == 16) {
Evan Cheng52ceafa2009-01-02 05:29:08 +00004109 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
4110 // If Idx is 0, it's cheaper to do a move instead of a pextrw.
4111 if (Idx == 0)
Dale Johannesenace16102009-02-03 19:33:06 +00004112 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
4113 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
4114 DAG.getNode(ISD::BIT_CONVERT, dl,
4115 MVT::v4i32,
Evan Cheng52ceafa2009-01-02 05:29:08 +00004116 Op.getOperand(0)),
4117 Op.getOperand(1)));
Dale Johannesenace16102009-02-03 19:33:06 +00004118 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
Nate Begeman14d12ca2008-02-11 04:19:36 +00004119 Op.getOperand(0), Op.getOperand(1));
Dale Johannesenace16102009-02-03 19:33:06 +00004120 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
Nate Begeman14d12ca2008-02-11 04:19:36 +00004121 DAG.getValueType(VT));
Dale Johannesenace16102009-02-03 19:33:06 +00004122 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
Evan Cheng62a3f152008-03-24 21:52:23 +00004123 } else if (VT == MVT::f32) {
4124 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
4125 // the result back to FR32 register. It's only worth matching if the
Dan Gohmand17cfbe2008-10-31 00:57:24 +00004126 // result has a single use which is a store or a bitcast to i32. And in
4127 // the case of a store, it's not worth it if the index is a constant 0,
4128 // because a MOVSSmr can be used instead, which is smaller and faster.
Evan Cheng62a3f152008-03-24 21:52:23 +00004129 if (!Op.hasOneUse())
Dan Gohman475871a2008-07-27 21:46:04 +00004130 return SDValue();
Gabor Greifba36cb52008-08-28 21:40:38 +00004131 SDNode *User = *Op.getNode()->use_begin();
Dan Gohmand17cfbe2008-10-31 00:57:24 +00004132 if ((User->getOpcode() != ISD::STORE ||
4133 (isa<ConstantSDNode>(Op.getOperand(1)) &&
4134 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) &&
Dan Gohman171c11e2008-04-16 02:32:24 +00004135 (User->getOpcode() != ISD::BIT_CONVERT ||
4136 User->getValueType(0) != MVT::i32))
Dan Gohman475871a2008-07-27 21:46:04 +00004137 return SDValue();
Dale Johannesenace16102009-02-03 19:33:06 +00004138 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
Scott Michelfdc40a02009-02-17 22:15:04 +00004139 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4i32,
Dale Johannesenace16102009-02-03 19:33:06 +00004140 Op.getOperand(0)),
4141 Op.getOperand(1));
4142 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Extract);
Mon P Wangf0fcdd82009-01-15 21:10:20 +00004143 } else if (VT == MVT::i32) {
4144 // ExtractPS works with constant index.
4145 if (isa<ConstantSDNode>(Op.getOperand(1)))
4146 return Op;
Nate Begeman14d12ca2008-02-11 04:19:36 +00004147 }
Dan Gohman475871a2008-07-27 21:46:04 +00004148 return SDValue();
Nate Begeman14d12ca2008-02-11 04:19:36 +00004149}
4150
4151
Dan Gohman475871a2008-07-27 21:46:04 +00004152SDValue
4153X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
Evan Cheng0db9fe62006-04-25 20:13:52 +00004154 if (!isa<ConstantSDNode>(Op.getOperand(1)))
Dan Gohman475871a2008-07-27 21:46:04 +00004155 return SDValue();
Evan Cheng0db9fe62006-04-25 20:13:52 +00004156
Evan Cheng62a3f152008-03-24 21:52:23 +00004157 if (Subtarget->hasSSE41()) {
Dan Gohman475871a2008-07-27 21:46:04 +00004158 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG);
Gabor Greifba36cb52008-08-28 21:40:38 +00004159 if (Res.getNode())
Evan Cheng62a3f152008-03-24 21:52:23 +00004160 return Res;
4161 }
Nate Begeman14d12ca2008-02-11 04:19:36 +00004162
Duncan Sands83ec4b62008-06-06 12:08:01 +00004163 MVT VT = Op.getValueType();
Dale Johannesen6f38cb62009-02-07 19:59:05 +00004164 DebugLoc dl = Op.getDebugLoc();
Evan Cheng0db9fe62006-04-25 20:13:52 +00004165 // TODO: handle v16i8.
Duncan Sands83ec4b62008-06-06 12:08:01 +00004166 if (VT.getSizeInBits() == 16) {
Dan Gohman475871a2008-07-27 21:46:04 +00004167 SDValue Vec = Op.getOperand(0);
Dan Gohmanf5aeb1a2008-09-12 16:56:44 +00004168 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
Evan Cheng14b32e12007-12-11 01:46:18 +00004169 if (Idx == 0)
Dale Johannesenace16102009-02-03 19:33:06 +00004170 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
4171 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
Scott Michelfdc40a02009-02-17 22:15:04 +00004172 DAG.getNode(ISD::BIT_CONVERT, dl,
Dale Johannesenace16102009-02-03 19:33:06 +00004173 MVT::v4i32, Vec),
Evan Cheng14b32e12007-12-11 01:46:18 +00004174 Op.getOperand(1)));
Evan Cheng0db9fe62006-04-25 20:13:52 +00004175 // Transform it so it match pextrw which produces a 32-bit result.
Duncan Sands83ec4b62008-06-06 12:08:01 +00004176 MVT EVT = (MVT::SimpleValueType)(VT.getSimpleVT()+1);
Dale Johannesenace16102009-02-03 19:33:06 +00004177 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EVT,
Evan Cheng0db9fe62006-04-25 20:13:52 +00004178 Op.getOperand(0), Op.getOperand(1));
Dale Johannesenace16102009-02-03 19:33:06 +00004179 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EVT, Extract,
Evan Cheng0db9fe62006-04-25 20:13:52 +00004180 DAG.getValueType(VT));
Dale Johannesenace16102009-02-03 19:33:06 +00004181 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
Duncan Sands83ec4b62008-06-06 12:08:01 +00004182 } else if (VT.getSizeInBits() == 32) {
Dan Gohmanf5aeb1a2008-09-12 16:56:44 +00004183 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
Evan Cheng0db9fe62006-04-25 20:13:52 +00004184 if (Idx == 0)
4185 return Op;
Nate Begeman9008ca62009-04-27 18:41:29 +00004186
Evan Cheng0db9fe62006-04-25 20:13:52 +00004187 // SHUFPS the element to the lowest double word, then movss.
Nate Begeman9008ca62009-04-27 18:41:29 +00004188 int Mask[4] = { Idx, -1, -1, -1 };
4189 MVT VVT = Op.getOperand(0).getValueType();
4190 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
4191 DAG.getUNDEF(VVT), Mask);
Dale Johannesenace16102009-02-03 19:33:06 +00004192 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
Chris Lattner0bd48932008-01-17 07:00:52 +00004193 DAG.getIntPtrConstant(0));
Duncan Sands83ec4b62008-06-06 12:08:01 +00004194 } else if (VT.getSizeInBits() == 64) {
Nate Begeman14d12ca2008-02-11 04:19:36 +00004195 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
4196 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
4197 // to match extract_elt for f64.
Dan Gohmanf5aeb1a2008-09-12 16:56:44 +00004198 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
Evan Cheng0db9fe62006-04-25 20:13:52 +00004199 if (Idx == 0)
4200 return Op;
4201
4202 // UNPCKHPD the element to the lowest double word, then movsd.
4203 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
4204 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
Nate Begeman9008ca62009-04-27 18:41:29 +00004205 int Mask[2] = { 1, -1 };
4206 MVT VVT = Op.getOperand(0).getValueType();
4207 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
4208 DAG.getUNDEF(VVT), Mask);
Dale Johannesenace16102009-02-03 19:33:06 +00004209 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
Chris Lattner0bd48932008-01-17 07:00:52 +00004210 DAG.getIntPtrConstant(0));
Evan Cheng0db9fe62006-04-25 20:13:52 +00004211 }
4212
Dan Gohman475871a2008-07-27 21:46:04 +00004213 return SDValue();
Evan Cheng0db9fe62006-04-25 20:13:52 +00004214}
4215
Dan Gohman475871a2008-07-27 21:46:04 +00004216SDValue
4217X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG){
Duncan Sands83ec4b62008-06-06 12:08:01 +00004218 MVT VT = Op.getValueType();
4219 MVT EVT = VT.getVectorElementType();
Dale Johannesen6f38cb62009-02-07 19:59:05 +00004220 DebugLoc dl = Op.getDebugLoc();
Nate Begeman14d12ca2008-02-11 04:19:36 +00004221
Dan Gohman475871a2008-07-27 21:46:04 +00004222 SDValue N0 = Op.getOperand(0);
4223 SDValue N1 = Op.getOperand(1);
4224 SDValue N2 = Op.getOperand(2);
Nate Begeman14d12ca2008-02-11 04:19:36 +00004225
Dan Gohmanef521f12008-08-14 22:53:18 +00004226 if ((EVT.getSizeInBits() == 8 || EVT.getSizeInBits() == 16) &&
4227 isa<ConstantSDNode>(N2)) {
Duncan Sands83ec4b62008-06-06 12:08:01 +00004228 unsigned Opc = (EVT.getSizeInBits() == 8) ? X86ISD::PINSRB
Nate Begemanb9a47b82009-02-23 08:49:38 +00004229 : X86ISD::PINSRW;
Nate Begeman14d12ca2008-02-11 04:19:36 +00004230 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
4231 // argument.
4232 if (N1.getValueType() != MVT::i32)
Dale Johannesenace16102009-02-03 19:33:06 +00004233 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
Nate Begeman14d12ca2008-02-11 04:19:36 +00004234 if (N2.getValueType() != MVT::i32)
Dan Gohmanf5aeb1a2008-09-12 16:56:44 +00004235 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue());
Dale Johannesenace16102009-02-03 19:33:06 +00004236 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
Dan Gohmanc0573b12008-08-14 22:43:26 +00004237 } else if (EVT == MVT::f32 && isa<ConstantSDNode>(N2)) {
Nate Begeman14d12ca2008-02-11 04:19:36 +00004238 // Bits [7:6] of the constant are the source select. This will always be
4239 // zero here. The DAG Combiner may combine an extract_elt index into these
4240 // bits. For example (insert (extract, 3), 2) could be matched by putting
4241 // the '3' into bits [7:6] of X86ISD::INSERTPS.
Scott Michelfdc40a02009-02-17 22:15:04 +00004242 // Bits [5:4] of the constant are the destination select. This is the
Nate Begeman14d12ca2008-02-11 04:19:36 +00004243 // value of the incoming immediate.
Scott Michelfdc40a02009-02-17 22:15:04 +00004244 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
Nate Begeman14d12ca2008-02-11 04:19:36 +00004245 // combine either bitwise AND or insert of float 0.0 to set these bits.
Dan Gohmanf5aeb1a2008-09-12 16:56:44 +00004246 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue() << 4);
Dale Johannesenace16102009-02-03 19:33:06 +00004247 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
Mon P Wangf0fcdd82009-01-15 21:10:20 +00004248 } else if (EVT == MVT::i32) {
4249 // InsertPS works with constant index.
4250 if (isa<ConstantSDNode>(N2))
4251 return Op;
Nate Begeman14d12ca2008-02-11 04:19:36 +00004252 }
Dan Gohman475871a2008-07-27 21:46:04 +00004253 return SDValue();
Nate Begeman14d12ca2008-02-11 04:19:36 +00004254}
4255
Dan Gohman475871a2008-07-27 21:46:04 +00004256SDValue
4257X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
Duncan Sands83ec4b62008-06-06 12:08:01 +00004258 MVT VT = Op.getValueType();
4259 MVT EVT = VT.getVectorElementType();
Nate Begeman14d12ca2008-02-11 04:19:36 +00004260
4261 if (Subtarget->hasSSE41())
4262 return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG);
4263
Evan Cheng794405e2007-12-12 07:55:34 +00004264 if (EVT == MVT::i8)
Dan Gohman475871a2008-07-27 21:46:04 +00004265 return SDValue();
Evan Cheng794405e2007-12-12 07:55:34 +00004266
Dale Johannesen6f38cb62009-02-07 19:59:05 +00004267 DebugLoc dl = Op.getDebugLoc();
Dan Gohman475871a2008-07-27 21:46:04 +00004268 SDValue N0 = Op.getOperand(0);
4269 SDValue N1 = Op.getOperand(1);
4270 SDValue N2 = Op.getOperand(2);
Evan Cheng794405e2007-12-12 07:55:34 +00004271
Eli Friedman30e71eb2009-06-06 06:32:50 +00004272 if (EVT.getSizeInBits() == 16 && isa<ConstantSDNode>(N2)) {
Evan Cheng794405e2007-12-12 07:55:34 +00004273 // Transform it so it match pinsrw which expects a 16-bit value in a GR32
4274 // as its second argument.
Evan Cheng0db9fe62006-04-25 20:13:52 +00004275 if (N1.getValueType() != MVT::i32)
Dale Johannesenace16102009-02-03 19:33:06 +00004276 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
Evan Cheng0db9fe62006-04-25 20:13:52 +00004277 if (N2.getValueType() != MVT::i32)
Dan Gohmanf5aeb1a2008-09-12 16:56:44 +00004278 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue());
Dale Johannesenace16102009-02-03 19:33:06 +00004279 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2);
Evan Cheng0db9fe62006-04-25 20:13:52 +00004280 }
Dan Gohman475871a2008-07-27 21:46:04 +00004281 return SDValue();
Evan Cheng0db9fe62006-04-25 20:13:52 +00004282}
4283
Dan Gohman475871a2008-07-27 21:46:04 +00004284SDValue
4285X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
Dale Johannesen6f38cb62009-02-07 19:59:05 +00004286 DebugLoc dl = Op.getDebugLoc();
Evan Cheng52672b82008-07-22 18:39:19 +00004287 if (Op.getValueType() == MVT::v2f32)
Dale Johannesenace16102009-02-03 19:33:06 +00004288 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f32,
4289 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i32,
4290 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32,
Evan Cheng52672b82008-07-22 18:39:19 +00004291 Op.getOperand(0))));
4292
Dale Johannesenace16102009-02-03 19:33:06 +00004293 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
Duncan Sands83ec4b62008-06-06 12:08:01 +00004294 MVT VT = MVT::v2i32;
4295 switch (Op.getValueType().getSimpleVT()) {
Evan Chengefec7512008-02-18 23:04:32 +00004296 default: break;
4297 case MVT::v16i8:
4298 case MVT::v8i16:
4299 VT = MVT::v4i32;
4300 break;
4301 }
Dale Johannesenace16102009-02-03 19:33:06 +00004302 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(),
4303 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, AnyExt));
Evan Cheng0db9fe62006-04-25 20:13:52 +00004304}
4305
Bill Wendling056292f2008-09-16 21:48:12 +00004306// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
4307// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
4308// one of the above mentioned nodes. It has to be wrapped because otherwise
4309// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
4310// be used to form addressing mode. These wrapped nodes will be selected
4311// into MOV32ri.
Dan Gohman475871a2008-07-27 21:46:04 +00004312SDValue
4313X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) {
Evan Cheng0db9fe62006-04-25 20:13:52 +00004314 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
Chris Lattner41621a22009-06-26 19:22:52 +00004315
4316 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
4317 // global base reg.
4318 unsigned char OpFlag = 0;
4319 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
4320 if (Subtarget->isPICStyleStub())
4321 OpFlag = X86II::MO_PIC_BASE_OFFSET;
4322 else if (Subtarget->isPICStyleGOT())
4323 OpFlag = X86II::MO_GOTOFF;
4324 }
4325
Evan Cheng1606e8e2009-03-13 07:51:59 +00004326 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(),
Chris Lattner41621a22009-06-26 19:22:52 +00004327 CP->getAlignment(),
4328 CP->getOffset(), OpFlag);
4329 DebugLoc DL = CP->getDebugLoc();
4330 Result = DAG.getNode(X86ISD::Wrapper, DL, getPointerTy(), Result);
Anton Korobeynikov7f705592007-01-12 19:20:47 +00004331 // With PIC, the address is actually $g + Offset.
Chris Lattner41621a22009-06-26 19:22:52 +00004332 if (OpFlag) {
4333 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
Dale Johannesenb300d2a2009-02-07 00:55:49 +00004334 DAG.getNode(X86ISD::GlobalBaseReg,
Chris Lattner41621a22009-06-26 19:22:52 +00004335 DebugLoc::getUnknownLoc(), getPointerTy()),
Anton Korobeynikov7f705592007-01-12 19:20:47 +00004336 Result);
Evan Cheng0db9fe62006-04-25 20:13:52 +00004337 }
4338
4339 return Result;
4340}
4341
Dan Gohman475871a2008-07-27 21:46:04 +00004342SDValue
Dale Johannesen33c960f2009-02-04 20:06:27 +00004343X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl,
Dan Gohman6520e202008-10-18 02:06:02 +00004344 int64_t Offset,
Evan Chengda43bcf2008-09-24 00:05:32 +00004345 SelectionDAG &DAG) const {
Dan Gohman6520e202008-10-18 02:06:02 +00004346 bool IsPic = getTargetMachine().getRelocationModel() == Reloc::PIC_;
4347 bool ExtraLoadRequired =
4348 Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false);
4349
4350 // Create the TargetGlobalAddress node, folding in the constant
4351 // offset if it is legal.
4352 SDValue Result;
Dan Gohman44013612008-10-21 03:38:42 +00004353 if (!IsPic && !ExtraLoadRequired && isInt32(Offset)) {
Dan Gohman6520e202008-10-18 02:06:02 +00004354 Result = DAG.getTargetGlobalAddress(GV, getPointerTy(), Offset);
4355 Offset = 0;
4356 } else
4357 Result = DAG.getTargetGlobalAddress(GV, getPointerTy(), 0);
Dale Johannesenb300d2a2009-02-07 00:55:49 +00004358 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
Dan Gohman6520e202008-10-18 02:06:02 +00004359
Anton Korobeynikov7f705592007-01-12 19:20:47 +00004360 // With PIC, the address is actually $g + Offset.
Dan Gohman6520e202008-10-18 02:06:02 +00004361 if (IsPic && !Subtarget->isPICStyleRIPRel()) {
Dale Johannesen33c960f2009-02-04 20:06:27 +00004362 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
4363 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
Anton Korobeynikov7f705592007-01-12 19:20:47 +00004364 Result);
Evan Cheng0db9fe62006-04-25 20:13:52 +00004365 }
Scott Michelfdc40a02009-02-17 22:15:04 +00004366
Anton Korobeynikov2b2bc682006-12-22 22:29:05 +00004367 // For Darwin & Mingw32, external and weak symbols are indirect, so we want to
4368 // load the value at address GV, not the value of GV itself. This means that
4369 // the GlobalAddress must be in the base or index register of the address, not
4370 // the GV offset field. Platform check is inside GVRequiresExtraLoad() call
Anton Korobeynikov7f705592007-01-12 19:20:47 +00004371 // The same applies for external symbols during PIC codegen
Dan Gohman6520e202008-10-18 02:06:02 +00004372 if (ExtraLoadRequired)
Dale Johannesen33c960f2009-02-04 20:06:27 +00004373 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result,
Dan Gohman3069b872008-02-07 18:41:25 +00004374 PseudoSourceValue::getGOT(), 0);
Evan Cheng0db9fe62006-04-25 20:13:52 +00004375
Dan Gohman6520e202008-10-18 02:06:02 +00004376 // If there was a non-zero offset that we didn't fold, create an explicit
4377 // addition for it.
4378 if (Offset != 0)
Dale Johannesen33c960f2009-02-04 20:06:27 +00004379 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result,
Dan Gohman6520e202008-10-18 02:06:02 +00004380 DAG.getConstant(Offset, getPointerTy()));
4381
Evan Cheng0db9fe62006-04-25 20:13:52 +00004382 return Result;
4383}
4384
Evan Chengda43bcf2008-09-24 00:05:32 +00004385SDValue
4386X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) {
4387 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
Dan Gohman6520e202008-10-18 02:06:02 +00004388 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
Dale Johannesen6f38cb62009-02-07 19:59:05 +00004389 return LowerGlobalAddress(GV, Op.getDebugLoc(), Offset, DAG);
Evan Chengda43bcf2008-09-24 00:05:32 +00004390}
4391
Rafael Espindola2ee3db32009-04-17 14:35:58 +00004392static SDValue
4393GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
Chris Lattnerb903bed2009-06-26 21:20:29 +00004394 SDValue *InFlag, const MVT PtrVT, unsigned ReturnReg,
4395 unsigned char OperandFlags) {
Rafael Espindola2ee3db32009-04-17 14:35:58 +00004396 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
4397 DebugLoc dl = GA->getDebugLoc();
4398 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(),
4399 GA->getValueType(0),
Chris Lattnerb903bed2009-06-26 21:20:29 +00004400 GA->getOffset(),
4401 OperandFlags);
Rafael Espindola2ee3db32009-04-17 14:35:58 +00004402 if (InFlag) {
4403 SDValue Ops[] = { Chain, TGA, *InFlag };
Rafael Espindola15f1b662009-04-24 12:59:40 +00004404 Chain = DAG.getNode(X86ISD::TLSADDR, dl, NodeTys, Ops, 3);
Rafael Espindola2ee3db32009-04-17 14:35:58 +00004405 } else {
4406 SDValue Ops[] = { Chain, TGA };
Rafael Espindola15f1b662009-04-24 12:59:40 +00004407 Chain = DAG.getNode(X86ISD::TLSADDR, dl, NodeTys, Ops, 2);
Rafael Espindola2ee3db32009-04-17 14:35:58 +00004408 }
Rafael Espindola15f1b662009-04-24 12:59:40 +00004409 SDValue Flag = Chain.getValue(1);
4410 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
Rafael Espindola2ee3db32009-04-17 14:35:58 +00004411}
4412
Anton Korobeynikov6625eff2008-05-04 21:36:32 +00004413// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
Dan Gohman475871a2008-07-27 21:46:04 +00004414static SDValue
Anton Korobeynikov6625eff2008-05-04 21:36:32 +00004415LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
Duncan Sands83ec4b62008-06-06 12:08:01 +00004416 const MVT PtrVT) {
Dan Gohman475871a2008-07-27 21:46:04 +00004417 SDValue InFlag;
Dale Johannesendd64c412009-02-04 00:33:20 +00004418 DebugLoc dl = GA->getDebugLoc(); // ? function entry point might be better
4419 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
Lauro Ramos Venanciob3a04172007-04-20 21:38:10 +00004420 DAG.getNode(X86ISD::GlobalBaseReg,
Dale Johannesenb300d2a2009-02-07 00:55:49 +00004421 DebugLoc::getUnknownLoc(),
Lauro Ramos Venanciob3a04172007-04-20 21:38:10 +00004422 PtrVT), InFlag);
4423 InFlag = Chain.getValue(1);
4424
Chris Lattnerb903bed2009-06-26 21:20:29 +00004425 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
Lauro Ramos Venanciob3a04172007-04-20 21:38:10 +00004426}
4427
Anton Korobeynikov6625eff2008-05-04 21:36:32 +00004428// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
Dan Gohman475871a2008-07-27 21:46:04 +00004429static SDValue
Anton Korobeynikov6625eff2008-05-04 21:36:32 +00004430LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
Duncan Sands83ec4b62008-06-06 12:08:01 +00004431 const MVT PtrVT) {
Chris Lattnerb903bed2009-06-26 21:20:29 +00004432 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, NULL, PtrVT,
4433 X86::RAX, X86II::MO_TLSGD);
Anton Korobeynikov6625eff2008-05-04 21:36:32 +00004434}
4435
Lauro Ramos Venanciob3a04172007-04-20 21:38:10 +00004436// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or
4437// "local exec" model.
Dan Gohman475871a2008-07-27 21:46:04 +00004438static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
Rafael Espindola7ff5bff2009-04-13 13:02:49 +00004439 const MVT PtrVT, TLSModel::Model model,
4440 bool is64Bit) {
Dale Johannesen33c960f2009-02-04 20:06:27 +00004441 DebugLoc dl = GA->getDebugLoc();
Lauro Ramos Venanciob3a04172007-04-20 21:38:10 +00004442 // Get the Thread Pointer
Rafael Espindola094fad32009-04-08 21:14:34 +00004443 SDValue Base = DAG.getNode(X86ISD::SegmentBaseAddress,
4444 DebugLoc::getUnknownLoc(), PtrVT,
Rafael Espindola7ff5bff2009-04-13 13:02:49 +00004445 DAG.getRegister(is64Bit? X86::FS : X86::GS,
4446 MVT::i32));
Rafael Espindola094fad32009-04-08 21:14:34 +00004447
4448 SDValue ThreadPointer = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Base,
4449 NULL, 0);
4450
Chris Lattnerb903bed2009-06-26 21:20:29 +00004451 unsigned char OperandFlags = 0;
4452 if (model == TLSModel::InitialExec) {
4453 OperandFlags = is64Bit ? X86II::MO_GOTTPOFF : X86II::MO_INDNTPOFF;
4454 } else {
4455 assert(model == TLSModel::LocalExec);
4456 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
4457 }
4458
4459
Lauro Ramos Venanciob3a04172007-04-20 21:38:10 +00004460 // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial
4461 // exec)
Chris Lattner4150c082009-06-21 02:22:34 +00004462 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0),
Chris Lattnerb903bed2009-06-26 21:20:29 +00004463 GA->getOffset(), OperandFlags);
Dale Johannesenb300d2a2009-02-07 00:55:49 +00004464 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, PtrVT, TGA);
Lauro Ramos Venancio7d2cc2b2007-04-22 22:50:52 +00004465
Rafael Espindola9a580232009-02-27 13:37:18 +00004466 if (model == TLSModel::InitialExec)
Dale Johannesen33c960f2009-02-04 20:06:27 +00004467 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
Dan Gohman3069b872008-02-07 18:41:25 +00004468 PseudoSourceValue::getGOT(), 0);
Lauro Ramos Venancio7d2cc2b2007-04-22 22:50:52 +00004469
Lauro Ramos Venanciob3a04172007-04-20 21:38:10 +00004470 // The address of the thread local variable is the add of the thread
4471 // pointer with the offset of the variable.
Dale Johannesen33c960f2009-02-04 20:06:27 +00004472 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
Lauro Ramos Venanciob3a04172007-04-20 21:38:10 +00004473}
4474
Dan Gohman475871a2008-07-27 21:46:04 +00004475SDValue
4476X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) {
Lauro Ramos Venanciob3a04172007-04-20 21:38:10 +00004477 // TODO: implement the "local dynamic" model
Lauro Ramos Venancio2c5c1112007-04-21 20:56:26 +00004478 // TODO: implement the "initial exec"model for pic executables
Anton Korobeynikov6625eff2008-05-04 21:36:32 +00004479 assert(Subtarget->isTargetELF() &&
4480 "TLS not implemented for non-ELF targets");
Lauro Ramos Venanciob3a04172007-04-20 21:38:10 +00004481 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
Chris Lattnerb903bed2009-06-26 21:20:29 +00004482 const GlobalValue *GV = GA->getGlobal();
4483
4484 // If GV is an alias then use the aliasee for determining
4485 // thread-localness.
4486 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
4487 GV = GA->resolveAliasedGlobal(false);
4488
4489 TLSModel::Model model = getTLSModel(GV,
4490 getTargetMachine().getRelocationModel());
4491
4492 switch (model) {
4493 case TLSModel::GeneralDynamic:
4494 case TLSModel::LocalDynamic: // not implemented
4495 if (Subtarget->is64Bit())
Rafael Espindola9a580232009-02-27 13:37:18 +00004496 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy());
Chris Lattnerb903bed2009-06-26 21:20:29 +00004497 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy());
4498
4499 case TLSModel::InitialExec:
4500 case TLSModel::LocalExec:
4501 return LowerToTLSExecModel(GA, DAG, getPointerTy(), model,
4502 Subtarget->is64Bit());
Anton Korobeynikov6625eff2008-05-04 21:36:32 +00004503 }
Chris Lattnerb903bed2009-06-26 21:20:29 +00004504
Chris Lattner5867de12009-04-01 22:14:45 +00004505 assert(0 && "Unreachable");
4506 return SDValue();
Lauro Ramos Venanciob3a04172007-04-20 21:38:10 +00004507}
4508
Dan Gohman475871a2008-07-27 21:46:04 +00004509SDValue
4510X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) {
Dale Johannesende064702009-02-06 21:50:26 +00004511 // FIXME there isn't really any debug info here
4512 DebugLoc dl = Op.getDebugLoc();
Bill Wendling056292f2008-09-16 21:48:12 +00004513 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
4514 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy());
Dale Johannesenb300d2a2009-02-07 00:55:49 +00004515 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
Anton Korobeynikov7f705592007-01-12 19:20:47 +00004516 // With PIC, the address is actually $g + Offset.
4517 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
4518 !Subtarget->isPICStyleRIPRel()) {
Dale Johannesende064702009-02-06 21:50:26 +00004519 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
Scott Michelfdc40a02009-02-17 22:15:04 +00004520 DAG.getNode(X86ISD::GlobalBaseReg,
Dale Johannesenb300d2a2009-02-07 00:55:49 +00004521 DebugLoc::getUnknownLoc(),
4522 getPointerTy()),
Anton Korobeynikov7f705592007-01-12 19:20:47 +00004523 Result);
4524 }
4525
4526 return Result;
4527}
4528
Dan Gohman475871a2008-07-27 21:46:04 +00004529SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) {
Anton Korobeynikov7f705592007-01-12 19:20:47 +00004530 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
Chris Lattner55e7c822009-06-26 00:43:52 +00004531
4532 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
4533 // global base reg.
Chris Lattner41621a22009-06-26 19:22:52 +00004534 unsigned char OpFlag = 0;
Chris Lattner55e7c822009-06-26 00:43:52 +00004535 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
4536 if (Subtarget->isPICStyleStub())
Chris Lattner41621a22009-06-26 19:22:52 +00004537 OpFlag = X86II::MO_PIC_BASE_OFFSET;
Chris Lattner55e7c822009-06-26 00:43:52 +00004538 else if (Subtarget->isPICStyleGOT())
Chris Lattner41621a22009-06-26 19:22:52 +00004539 OpFlag = X86II::MO_GOTOFF;
Chris Lattner55e7c822009-06-26 00:43:52 +00004540 }
4541
4542 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(),
Chris Lattner41621a22009-06-26 19:22:52 +00004543 OpFlag);
Chris Lattner55e7c822009-06-26 00:43:52 +00004544 DebugLoc DL = JT->getDebugLoc();
4545 Result = DAG.getNode(X86ISD::Wrapper, DL, getPointerTy(), Result);
4546
Anton Korobeynikov7f705592007-01-12 19:20:47 +00004547 // With PIC, the address is actually $g + Offset.
Chris Lattner41621a22009-06-26 19:22:52 +00004548 if (OpFlag) {
Chris Lattner55e7c822009-06-26 00:43:52 +00004549 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
Dale Johannesenb300d2a2009-02-07 00:55:49 +00004550 DAG.getNode(X86ISD::GlobalBaseReg,
Chris Lattner41621a22009-06-26 19:22:52 +00004551 DebugLoc::getUnknownLoc(), getPointerTy()),
Anton Korobeynikov7f705592007-01-12 19:20:47 +00004552 Result);
Evan Cheng0db9fe62006-04-25 20:13:52 +00004553 }
4554
4555 return Result;
4556}
4557
Chris Lattner2ff75ee2007-10-17 06:02:13 +00004558/// LowerShift - Lower SRA_PARTS and friends, which return two i32 values and
Scott Michelfdc40a02009-02-17 22:15:04 +00004559/// take a 2 x i32 value to shift plus a shift amount.
Dan Gohman475871a2008-07-27 21:46:04 +00004560SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) {
Dan Gohman4c1fa612008-03-03 22:22:09 +00004561 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
Duncan Sands83ec4b62008-06-06 12:08:01 +00004562 MVT VT = Op.getValueType();
4563 unsigned VTBits = VT.getSizeInBits();
Dale Johannesen6f38cb62009-02-07 19:59:05 +00004564 DebugLoc dl = Op.getDebugLoc();
Chris Lattner2ff75ee2007-10-17 06:02:13 +00004565 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
Dan Gohman475871a2008-07-27 21:46:04 +00004566 SDValue ShOpLo = Op.getOperand(0);
4567 SDValue ShOpHi = Op.getOperand(1);
4568 SDValue ShAmt = Op.getOperand(2);
4569 SDValue Tmp1 = isSRA ?
Scott Michelfdc40a02009-02-17 22:15:04 +00004570 DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
Dale Johannesenace16102009-02-03 19:33:06 +00004571 DAG.getConstant(VTBits - 1, MVT::i8)) :
Dan Gohman4c1fa612008-03-03 22:22:09 +00004572 DAG.getConstant(0, VT);
Evan Chenge3413162006-01-09 18:33:28 +00004573
Dan Gohman475871a2008-07-27 21:46:04 +00004574 SDValue Tmp2, Tmp3;
Chris Lattner2ff75ee2007-10-17 06:02:13 +00004575 if (Op.getOpcode() == ISD::SHL_PARTS) {
Dale Johannesenace16102009-02-03 19:33:06 +00004576 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt);
4577 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
Chris Lattner2ff75ee2007-10-17 06:02:13 +00004578 } else {
Dale Johannesenace16102009-02-03 19:33:06 +00004579 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt);
4580 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, ShAmt);
Chris Lattner2ff75ee2007-10-17 06:02:13 +00004581 }
Evan Chenge3413162006-01-09 18:33:28 +00004582
Dale Johannesenace16102009-02-03 19:33:06 +00004583 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
Dan Gohman4c1fa612008-03-03 22:22:09 +00004584 DAG.getConstant(VTBits, MVT::i8));
Dale Johannesenace16102009-02-03 19:33:06 +00004585 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, VT,
Chris Lattner2ff75ee2007-10-17 06:02:13 +00004586 AndNode, DAG.getConstant(0, MVT::i8));
Evan Chenge3413162006-01-09 18:33:28 +00004587
Dan Gohman475871a2008-07-27 21:46:04 +00004588 SDValue Hi, Lo;
4589 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8);
4590 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond };
4591 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond };
Duncan Sandsf9516202008-06-30 10:19:09 +00004592
Chris Lattner2ff75ee2007-10-17 06:02:13 +00004593 if (Op.getOpcode() == ISD::SHL_PARTS) {
Dale Johannesenace16102009-02-03 19:33:06 +00004594 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4);
4595 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4);
Chris Lattner2ff75ee2007-10-17 06:02:13 +00004596 } else {
Dale Johannesenace16102009-02-03 19:33:06 +00004597 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4);
4598 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4);
Chris Lattner2ff75ee2007-10-17 06:02:13 +00004599 }
4600
Dan Gohman475871a2008-07-27 21:46:04 +00004601 SDValue Ops[2] = { Lo, Hi };
Dale Johannesenace16102009-02-03 19:33:06 +00004602 return DAG.getMergeValues(Ops, 2, dl);
Evan Cheng0db9fe62006-04-25 20:13:52 +00004603}
Evan Chenga3195e82006-01-12 22:54:21 +00004604
Dan Gohman475871a2008-07-27 21:46:04 +00004605SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
Duncan Sands83ec4b62008-06-06 12:08:01 +00004606 MVT SrcVT = Op.getOperand(0).getValueType();
Eli Friedman23ef1052009-06-06 03:57:58 +00004607
4608 if (SrcVT.isVector()) {
4609 if (SrcVT == MVT::v2i32 && Op.getValueType() == MVT::v2f64) {
4610 return Op;
4611 }
4612 return SDValue();
4613 }
4614
Duncan Sands8e4eb092008-06-08 20:54:56 +00004615 assert(SrcVT.getSimpleVT() <= MVT::i64 && SrcVT.getSimpleVT() >= MVT::i16 &&
Chris Lattnerb09916b2008-02-27 05:57:41 +00004616 "Unknown SINT_TO_FP to lower!");
Scott Michelfdc40a02009-02-17 22:15:04 +00004617
Eli Friedman36df4992009-05-27 00:47:34 +00004618 // These are really Legal; return the operand so the caller accepts it as
4619 // Legal.
Chris Lattnerb09916b2008-02-27 05:57:41 +00004620 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType()))
Eli Friedman36df4992009-05-27 00:47:34 +00004621 return Op;
4622 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) &&
4623 Subtarget->is64Bit()) {
4624 return Op;
4625 }
Scott Michelfdc40a02009-02-17 22:15:04 +00004626
Dale Johannesen6f38cb62009-02-07 19:59:05 +00004627 DebugLoc dl = Op.getDebugLoc();
Duncan Sands83ec4b62008-06-06 12:08:01 +00004628 unsigned Size = SrcVT.getSizeInBits()/8;
Evan Cheng0db9fe62006-04-25 20:13:52 +00004629 MachineFunction &MF = DAG.getMachineFunction();
4630 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
Dan Gohman475871a2008-07-27 21:46:04 +00004631 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
Dale Johannesenace16102009-02-03 19:33:06 +00004632 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
Bill Wendling105be5a2009-03-13 08:41:47 +00004633 StackSlot,
4634 PseudoSourceValue::getFixedStack(SSFI), 0);
Eli Friedman948e95a2009-05-23 09:59:16 +00004635 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
4636}
Evan Cheng0db9fe62006-04-25 20:13:52 +00004637
Eli Friedman948e95a2009-05-23 09:59:16 +00004638SDValue X86TargetLowering::BuildFILD(SDValue Op, MVT SrcVT, SDValue Chain,
4639 SDValue StackSlot,
4640 SelectionDAG &DAG) {
Evan Cheng0db9fe62006-04-25 20:13:52 +00004641 // Build the FILD
Eli Friedman948e95a2009-05-23 09:59:16 +00004642 DebugLoc dl = Op.getDebugLoc();
Chris Lattner5a88b832007-02-25 07:10:00 +00004643 SDVTList Tys;
Chris Lattner78631162008-01-16 06:24:21 +00004644 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
Dale Johannesen9e3d3ab2007-09-14 22:26:36 +00004645 if (useSSE)
Chris Lattner5a88b832007-02-25 07:10:00 +00004646 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag);
4647 else
Dale Johannesen849f2142007-07-03 00:53:03 +00004648 Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
Dan Gohman475871a2008-07-27 21:46:04 +00004649 SmallVector<SDValue, 8> Ops;
Evan Cheng0db9fe62006-04-25 20:13:52 +00004650 Ops.push_back(Chain);
4651 Ops.push_back(StackSlot);
4652 Ops.push_back(DAG.getValueType(SrcVT));
Dale Johannesenace16102009-02-03 19:33:06 +00004653 SDValue Result = DAG.getNode(useSSE ? X86ISD::FILD_FLAG : X86ISD::FILD, dl,
Chris Lattnerb09916b2008-02-27 05:57:41 +00004654 Tys, &Ops[0], Ops.size());
Evan Cheng0db9fe62006-04-25 20:13:52 +00004655
Dale Johannesen9e3d3ab2007-09-14 22:26:36 +00004656 if (useSSE) {
Evan Cheng0db9fe62006-04-25 20:13:52 +00004657 Chain = Result.getValue(1);
Dan Gohman475871a2008-07-27 21:46:04 +00004658 SDValue InFlag = Result.getValue(2);
Evan Cheng0db9fe62006-04-25 20:13:52 +00004659
4660 // FIXME: Currently the FST is flagged to the FILD_FLAG. This
4661 // shouldn't be necessary except that RFP cannot be live across
4662 // multiple blocks. When stackifier is fixed, they can be uncoupled.
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +00004663 MachineFunction &MF = DAG.getMachineFunction();
Evan Cheng0db9fe62006-04-25 20:13:52 +00004664 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
Dan Gohman475871a2008-07-27 21:46:04 +00004665 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
Chris Lattner5a88b832007-02-25 07:10:00 +00004666 Tys = DAG.getVTList(MVT::Other);
Dan Gohman475871a2008-07-27 21:46:04 +00004667 SmallVector<SDValue, 8> Ops;
Evan Chenga3195e82006-01-12 22:54:21 +00004668 Ops.push_back(Chain);
Evan Cheng0db9fe62006-04-25 20:13:52 +00004669 Ops.push_back(Result);
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +00004670 Ops.push_back(StackSlot);
Evan Cheng0db9fe62006-04-25 20:13:52 +00004671 Ops.push_back(DAG.getValueType(Op.getValueType()));
4672 Ops.push_back(InFlag);
Dale Johannesenace16102009-02-03 19:33:06 +00004673 Chain = DAG.getNode(X86ISD::FST, dl, Tys, &Ops[0], Ops.size());
4674 Result = DAG.getLoad(Op.getValueType(), dl, Chain, StackSlot,
Dan Gohmana54cf172008-07-11 22:44:52 +00004675 PseudoSourceValue::getFixedStack(SSFI), 0);
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +00004676 }
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +00004677
Evan Cheng0db9fe62006-04-25 20:13:52 +00004678 return Result;
4679}
4680
Bill Wendling8b8a6362009-01-17 03:56:04 +00004681// LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion.
4682SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) {
4683 // This algorithm is not obvious. Here it is in C code, more or less:
4684 /*
4685 double uint64_to_double( uint32_t hi, uint32_t lo ) {
4686 static const __m128i exp = { 0x4330000045300000ULL, 0 };
4687 static const __m128d bias = { 0x1.0p84, 0x1.0p52 };
Dale Johannesen040225f2008-10-21 23:07:49 +00004688
Bill Wendling8b8a6362009-01-17 03:56:04 +00004689 // Copy ints to xmm registers.
4690 __m128i xh = _mm_cvtsi32_si128( hi );
4691 __m128i xl = _mm_cvtsi32_si128( lo );
Dale Johannesen040225f2008-10-21 23:07:49 +00004692
Bill Wendling8b8a6362009-01-17 03:56:04 +00004693 // Combine into low half of a single xmm register.
4694 __m128i x = _mm_unpacklo_epi32( xh, xl );
4695 __m128d d;
4696 double sd;
Dale Johannesen040225f2008-10-21 23:07:49 +00004697
Bill Wendling8b8a6362009-01-17 03:56:04 +00004698 // Merge in appropriate exponents to give the integer bits the right
4699 // magnitude.
4700 x = _mm_unpacklo_epi32( x, exp );
Dale Johannesen040225f2008-10-21 23:07:49 +00004701
Bill Wendling8b8a6362009-01-17 03:56:04 +00004702 // Subtract away the biases to deal with the IEEE-754 double precision
4703 // implicit 1.
4704 d = _mm_sub_pd( (__m128d) x, bias );
Dale Johannesen040225f2008-10-21 23:07:49 +00004705
Bill Wendling8b8a6362009-01-17 03:56:04 +00004706 // All conversions up to here are exact. The correctly rounded result is
4707 // calculated using the current rounding mode using the following
4708 // horizontal add.
4709 d = _mm_add_sd( d, _mm_unpackhi_pd( d, d ) );
4710 _mm_store_sd( &sd, d ); // Because we are returning doubles in XMM, this
4711 // store doesn't really need to be here (except
4712 // maybe to zero the other double)
4713 return sd;
4714 }
4715 */
Dale Johannesen040225f2008-10-21 23:07:49 +00004716
Dale Johannesen6f38cb62009-02-07 19:59:05 +00004717 DebugLoc dl = Op.getDebugLoc();
Dale Johannesenace16102009-02-03 19:33:06 +00004718
Dale Johannesen1c15bf52008-10-21 20:50:01 +00004719 // Build some magic constants.
Bill Wendling8b8a6362009-01-17 03:56:04 +00004720 std::vector<Constant*> CV0;
Dale Johannesen1c15bf52008-10-21 20:50:01 +00004721 CV0.push_back(ConstantInt::get(APInt(32, 0x45300000)));
4722 CV0.push_back(ConstantInt::get(APInt(32, 0x43300000)));
4723 CV0.push_back(ConstantInt::get(APInt(32, 0)));
4724 CV0.push_back(ConstantInt::get(APInt(32, 0)));
4725 Constant *C0 = ConstantVector::get(CV0);
Evan Cheng1606e8e2009-03-13 07:51:59 +00004726 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16);
Dale Johannesen1c15bf52008-10-21 20:50:01 +00004727
Bill Wendling8b8a6362009-01-17 03:56:04 +00004728 std::vector<Constant*> CV1;
Dale Johannesen1c15bf52008-10-21 20:50:01 +00004729 CV1.push_back(ConstantFP::get(APFloat(APInt(64, 0x4530000000000000ULL))));
4730 CV1.push_back(ConstantFP::get(APFloat(APInt(64, 0x4330000000000000ULL))));
4731 Constant *C1 = ConstantVector::get(CV1);
Evan Cheng1606e8e2009-03-13 07:51:59 +00004732 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16);
Dale Johannesen1c15bf52008-10-21 20:50:01 +00004733
Dale Johannesenace16102009-02-03 19:33:06 +00004734 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
4735 DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
Duncan Sands6b6aeb32008-10-22 11:24:12 +00004736 Op.getOperand(0),
4737 DAG.getIntPtrConstant(1)));
Dale Johannesenace16102009-02-03 19:33:06 +00004738 SDValue XR2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
4739 DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
Duncan Sands6b6aeb32008-10-22 11:24:12 +00004740 Op.getOperand(0),
4741 DAG.getIntPtrConstant(0)));
Nate Begeman9008ca62009-04-27 18:41:29 +00004742 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32, XR1, XR2);
Dale Johannesenace16102009-02-03 19:33:06 +00004743 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
Bill Wendling8b8a6362009-01-17 03:56:04 +00004744 PseudoSourceValue::getConstantPool(), 0,
4745 false, 16);
Nate Begeman9008ca62009-04-27 18:41:29 +00004746 SDValue Unpck2 = getUnpackl(DAG, dl, MVT::v4i32, Unpck1, CLod0);
Dale Johannesenace16102009-02-03 19:33:06 +00004747 SDValue XR2F = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Unpck2);
4748 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
Bill Wendling8b8a6362009-01-17 03:56:04 +00004749 PseudoSourceValue::getConstantPool(), 0,
4750 false, 16);
Dale Johannesenace16102009-02-03 19:33:06 +00004751 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
Bill Wendling8b8a6362009-01-17 03:56:04 +00004752
Dale Johannesen1c15bf52008-10-21 20:50:01 +00004753 // Add the halves; easiest way is to swap them into another reg first.
Nate Begeman9008ca62009-04-27 18:41:29 +00004754 int ShufMask[2] = { 1, -1 };
4755 SDValue Shuf = DAG.getVectorShuffle(MVT::v2f64, dl, Sub,
4756 DAG.getUNDEF(MVT::v2f64), ShufMask);
Dale Johannesenace16102009-02-03 19:33:06 +00004757 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::v2f64, Shuf, Sub);
4758 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Add,
Dale Johannesen1c15bf52008-10-21 20:50:01 +00004759 DAG.getIntPtrConstant(0));
4760}
4761
Bill Wendling8b8a6362009-01-17 03:56:04 +00004762// LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion.
4763SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG) {
Dale Johannesen6f38cb62009-02-07 19:59:05 +00004764 DebugLoc dl = Op.getDebugLoc();
Bill Wendling8b8a6362009-01-17 03:56:04 +00004765 // FP constant to bias correct the final result.
4766 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
4767 MVT::f64);
4768
4769 // Load the 32-bit value into an XMM register.
Dale Johannesenace16102009-02-03 19:33:06 +00004770 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
4771 DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
Bill Wendling8b8a6362009-01-17 03:56:04 +00004772 Op.getOperand(0),
4773 DAG.getIntPtrConstant(0)));
4774
Dale Johannesenace16102009-02-03 19:33:06 +00004775 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
4776 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Load),
Bill Wendling8b8a6362009-01-17 03:56:04 +00004777 DAG.getIntPtrConstant(0));
4778
4779 // Or the load with the bias.
Dale Johannesenace16102009-02-03 19:33:06 +00004780 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64,
4781 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64,
4782 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
Evan Cheng50c3dfe2009-01-19 08:19:57 +00004783 MVT::v2f64, Load)),
Dale Johannesenace16102009-02-03 19:33:06 +00004784 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64,
4785 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
Evan Cheng50c3dfe2009-01-19 08:19:57 +00004786 MVT::v2f64, Bias)));
Dale Johannesenace16102009-02-03 19:33:06 +00004787 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
4788 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Or),
Bill Wendling8b8a6362009-01-17 03:56:04 +00004789 DAG.getIntPtrConstant(0));
4790
4791 // Subtract the bias.
Dale Johannesenace16102009-02-03 19:33:06 +00004792 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
Bill Wendling8b8a6362009-01-17 03:56:04 +00004793
4794 // Handle final rounding.
Bill Wendling030939c2009-01-17 07:40:19 +00004795 MVT DestVT = Op.getValueType();
4796
4797 if (DestVT.bitsLT(MVT::f64)) {
Dale Johannesenace16102009-02-03 19:33:06 +00004798 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub,
Bill Wendling030939c2009-01-17 07:40:19 +00004799 DAG.getIntPtrConstant(0));
4800 } else if (DestVT.bitsGT(MVT::f64)) {
Dale Johannesenace16102009-02-03 19:33:06 +00004801 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub);
Bill Wendling030939c2009-01-17 07:40:19 +00004802 }
4803
4804 // Handle final rounding.
4805 return Sub;
Bill Wendling8b8a6362009-01-17 03:56:04 +00004806}
4807
4808SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
Evan Chenga06ec9e2009-01-19 08:08:22 +00004809 SDValue N0 = Op.getOperand(0);
Dale Johannesen6f38cb62009-02-07 19:59:05 +00004810 DebugLoc dl = Op.getDebugLoc();
Bill Wendling8b8a6362009-01-17 03:56:04 +00004811
Evan Chenga06ec9e2009-01-19 08:08:22 +00004812 // Now not UINT_TO_FP is legal (it's marked custom), dag combiner won't
4813 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
4814 // the optimization here.
4815 if (DAG.SignBitIsZero(N0))
Dale Johannesenace16102009-02-03 19:33:06 +00004816 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0);
Evan Chenga06ec9e2009-01-19 08:08:22 +00004817
4818 MVT SrcVT = N0.getValueType();
Bill Wendling8b8a6362009-01-17 03:56:04 +00004819 if (SrcVT == MVT::i64) {
Eli Friedman36df4992009-05-27 00:47:34 +00004820 // We only handle SSE2 f64 target here; caller can expand the rest.
Bill Wendling8b8a6362009-01-17 03:56:04 +00004821 if (Op.getValueType() != MVT::f64 || !X86ScalarSSEf64)
Daniel Dunbar82205572009-05-26 21:27:02 +00004822 return SDValue();
Bill Wendling030939c2009-01-17 07:40:19 +00004823
Bill Wendling8b8a6362009-01-17 03:56:04 +00004824 return LowerUINT_TO_FP_i64(Op, DAG);
Eli Friedman948e95a2009-05-23 09:59:16 +00004825 } else if (SrcVT == MVT::i32 && X86ScalarSSEf64) {
Bill Wendling8b8a6362009-01-17 03:56:04 +00004826 return LowerUINT_TO_FP_i32(Op, DAG);
4827 }
4828
Eli Friedman948e95a2009-05-23 09:59:16 +00004829 assert(SrcVT == MVT::i32 && "Unknown UINT_TO_FP to lower!");
4830
4831 // Make a 64-bit buffer, and use it to build an FILD.
4832 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
4833 SDValue WordOff = DAG.getConstant(4, getPointerTy());
4834 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl,
4835 getPointerTy(), StackSlot, WordOff);
4836 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
4837 StackSlot, NULL, 0);
4838 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32),
4839 OffsetSlot, NULL, 0);
4840 return BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
Bill Wendling8b8a6362009-01-17 03:56:04 +00004841}
4842
Dan Gohman475871a2008-07-27 21:46:04 +00004843std::pair<SDValue,SDValue> X86TargetLowering::
Eli Friedman948e95a2009-05-23 09:59:16 +00004844FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned) {
Dale Johannesen6f38cb62009-02-07 19:59:05 +00004845 DebugLoc dl = Op.getDebugLoc();
Eli Friedman948e95a2009-05-23 09:59:16 +00004846
4847 MVT DstTy = Op.getValueType();
4848
4849 if (!IsSigned) {
4850 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
4851 DstTy = MVT::i64;
4852 }
4853
4854 assert(DstTy.getSimpleVT() <= MVT::i64 &&
4855 DstTy.getSimpleVT() >= MVT::i16 &&
Evan Cheng0db9fe62006-04-25 20:13:52 +00004856 "Unknown FP_TO_SINT to lower!");
Evan Cheng0db9fe62006-04-25 20:13:52 +00004857
Dale Johannesen9e3d3ab2007-09-14 22:26:36 +00004858 // These are really Legal.
Eli Friedman948e95a2009-05-23 09:59:16 +00004859 if (DstTy == MVT::i32 &&
Chris Lattner78631162008-01-16 06:24:21 +00004860 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
Dan Gohman475871a2008-07-27 21:46:04 +00004861 return std::make_pair(SDValue(), SDValue());
Dale Johannesen73328d12007-09-19 23:55:34 +00004862 if (Subtarget->is64Bit() &&
Eli Friedman948e95a2009-05-23 09:59:16 +00004863 DstTy == MVT::i64 &&
Eli Friedman36df4992009-05-27 00:47:34 +00004864 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
Dan Gohman475871a2008-07-27 21:46:04 +00004865 return std::make_pair(SDValue(), SDValue());
Dale Johannesen9e3d3ab2007-09-14 22:26:36 +00004866
Evan Cheng87c89352007-10-15 20:11:21 +00004867 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary
4868 // stack slot.
4869 MachineFunction &MF = DAG.getMachineFunction();
Eli Friedman948e95a2009-05-23 09:59:16 +00004870 unsigned MemSize = DstTy.getSizeInBits()/8;
Evan Cheng87c89352007-10-15 20:11:21 +00004871 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
Dan Gohman475871a2008-07-27 21:46:04 +00004872 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
Eli Friedman948e95a2009-05-23 09:59:16 +00004873
Evan Cheng0db9fe62006-04-25 20:13:52 +00004874 unsigned Opc;
Eli Friedman948e95a2009-05-23 09:59:16 +00004875 switch (DstTy.getSimpleVT()) {
Chris Lattner27a6c732007-11-24 07:07:01 +00004876 default: assert(0 && "Invalid FP_TO_SINT to lower!");
4877 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
4878 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
4879 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
Evan Cheng0db9fe62006-04-25 20:13:52 +00004880 }
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +00004881
Dan Gohman475871a2008-07-27 21:46:04 +00004882 SDValue Chain = DAG.getEntryNode();
4883 SDValue Value = Op.getOperand(0);
Chris Lattner78631162008-01-16 06:24:21 +00004884 if (isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) {
Eli Friedman948e95a2009-05-23 09:59:16 +00004885 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
Dale Johannesenace16102009-02-03 19:33:06 +00004886 Chain = DAG.getStore(Chain, dl, Value, StackSlot,
Dan Gohmana54cf172008-07-11 22:44:52 +00004887 PseudoSourceValue::getFixedStack(SSFI), 0);
Dale Johannesen849f2142007-07-03 00:53:03 +00004888 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
Dan Gohman475871a2008-07-27 21:46:04 +00004889 SDValue Ops[] = {
Chris Lattner5a88b832007-02-25 07:10:00 +00004890 Chain, StackSlot, DAG.getValueType(Op.getOperand(0).getValueType())
4891 };
Dale Johannesenace16102009-02-03 19:33:06 +00004892 Value = DAG.getNode(X86ISD::FLD, dl, Tys, Ops, 3);
Evan Cheng0db9fe62006-04-25 20:13:52 +00004893 Chain = Value.getValue(1);
4894 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
4895 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
4896 }
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +00004897
Evan Cheng0db9fe62006-04-25 20:13:52 +00004898 // Build the FP_TO_INT*_IN_MEM
Dan Gohman475871a2008-07-27 21:46:04 +00004899 SDValue Ops[] = { Chain, Value, StackSlot };
Dale Johannesenace16102009-02-03 19:33:06 +00004900 SDValue FIST = DAG.getNode(Opc, dl, MVT::Other, Ops, 3);
Evan Chengd9558e02006-01-06 00:43:03 +00004901
Chris Lattner27a6c732007-11-24 07:07:01 +00004902 return std::make_pair(FIST, StackSlot);
Evan Cheng0db9fe62006-04-25 20:13:52 +00004903}
4904
Dan Gohman475871a2008-07-27 21:46:04 +00004905SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) {
Eli Friedman23ef1052009-06-06 03:57:58 +00004906 if (Op.getValueType().isVector()) {
4907 if (Op.getValueType() == MVT::v2i32 &&
4908 Op.getOperand(0).getValueType() == MVT::v2f64) {
4909 return Op;
4910 }
4911 return SDValue();
4912 }
4913
Eli Friedman948e95a2009-05-23 09:59:16 +00004914 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, true);
Dan Gohman475871a2008-07-27 21:46:04 +00004915 SDValue FIST = Vals.first, StackSlot = Vals.second;
Eli Friedman36df4992009-05-27 00:47:34 +00004916 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal.
4917 if (FIST.getNode() == 0) return Op;
Scott Michelfdc40a02009-02-17 22:15:04 +00004918
Chris Lattner27a6c732007-11-24 07:07:01 +00004919 // Load the result.
Dale Johannesen6f38cb62009-02-07 19:59:05 +00004920 return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(),
Dale Johannesenace16102009-02-03 19:33:06 +00004921 FIST, StackSlot, NULL, 0);
Chris Lattner27a6c732007-11-24 07:07:01 +00004922}
4923
Eli Friedman948e95a2009-05-23 09:59:16 +00004924SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) {
4925 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, false);
4926 SDValue FIST = Vals.first, StackSlot = Vals.second;
4927 assert(FIST.getNode() && "Unexpected failure");
4928
4929 // Load the result.
4930 return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(),
4931 FIST, StackSlot, NULL, 0);
4932}
4933
Dan Gohman475871a2008-07-27 21:46:04 +00004934SDValue X86TargetLowering::LowerFABS(SDValue Op, SelectionDAG &DAG) {
Dale Johannesen6f38cb62009-02-07 19:59:05 +00004935 DebugLoc dl = Op.getDebugLoc();
Duncan Sands83ec4b62008-06-06 12:08:01 +00004936 MVT VT = Op.getValueType();
4937 MVT EltVT = VT;
4938 if (VT.isVector())
4939 EltVT = VT.getVectorElementType();
Evan Cheng0db9fe62006-04-25 20:13:52 +00004940 std::vector<Constant*> CV;
Dan Gohman20382522007-07-10 00:05:58 +00004941 if (EltVT == MVT::f64) {
Chris Lattner02a260a2008-04-20 00:41:09 +00004942 Constant *C = ConstantFP::get(APFloat(APInt(64, ~(1ULL << 63))));
Dan Gohman20382522007-07-10 00:05:58 +00004943 CV.push_back(C);
4944 CV.push_back(C);
Evan Cheng0db9fe62006-04-25 20:13:52 +00004945 } else {
Chris Lattner02a260a2008-04-20 00:41:09 +00004946 Constant *C = ConstantFP::get(APFloat(APInt(32, ~(1U << 31))));
Dan Gohman20382522007-07-10 00:05:58 +00004947 CV.push_back(C);
4948 CV.push_back(C);
4949 CV.push_back(C);
4950 CV.push_back(C);
Evan Cheng0db9fe62006-04-25 20:13:52 +00004951 }
Dan Gohmand3006222007-07-27 17:16:43 +00004952 Constant *C = ConstantVector::get(CV);
Evan Cheng1606e8e2009-03-13 07:51:59 +00004953 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
Dale Johannesenace16102009-02-03 19:33:06 +00004954 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
Dan Gohman3069b872008-02-07 18:41:25 +00004955 PseudoSourceValue::getConstantPool(), 0,
Dan Gohmand3006222007-07-27 17:16:43 +00004956 false, 16);
Dale Johannesenace16102009-02-03 19:33:06 +00004957 return DAG.getNode(X86ISD::FAND, dl, VT, Op.getOperand(0), Mask);
Evan Cheng0db9fe62006-04-25 20:13:52 +00004958}
4959
Dan Gohman475871a2008-07-27 21:46:04 +00004960SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) {
Dale Johannesen6f38cb62009-02-07 19:59:05 +00004961 DebugLoc dl = Op.getDebugLoc();
Duncan Sands83ec4b62008-06-06 12:08:01 +00004962 MVT VT = Op.getValueType();
4963 MVT EltVT = VT;
Evan Chengd4d01b72007-07-19 23:36:01 +00004964 unsigned EltNum = 1;
Duncan Sands83ec4b62008-06-06 12:08:01 +00004965 if (VT.isVector()) {
4966 EltVT = VT.getVectorElementType();
4967 EltNum = VT.getVectorNumElements();
Evan Chengd4d01b72007-07-19 23:36:01 +00004968 }
Evan Cheng0db9fe62006-04-25 20:13:52 +00004969 std::vector<Constant*> CV;
Dan Gohman20382522007-07-10 00:05:58 +00004970 if (EltVT == MVT::f64) {
Chris Lattner02a260a2008-04-20 00:41:09 +00004971 Constant *C = ConstantFP::get(APFloat(APInt(64, 1ULL << 63)));
Dan Gohman20382522007-07-10 00:05:58 +00004972 CV.push_back(C);
4973 CV.push_back(C);
Evan Cheng0db9fe62006-04-25 20:13:52 +00004974 } else {
Chris Lattner02a260a2008-04-20 00:41:09 +00004975 Constant *C = ConstantFP::get(APFloat(APInt(32, 1U << 31)));
Dan Gohman20382522007-07-10 00:05:58 +00004976 CV.push_back(C);
4977 CV.push_back(C);
4978 CV.push_back(C);
4979 CV.push_back(C);
Evan Cheng0db9fe62006-04-25 20:13:52 +00004980 }
Dan Gohmand3006222007-07-27 17:16:43 +00004981 Constant *C = ConstantVector::get(CV);
Evan Cheng1606e8e2009-03-13 07:51:59 +00004982 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
Dale Johannesenace16102009-02-03 19:33:06 +00004983 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
Dan Gohman3069b872008-02-07 18:41:25 +00004984 PseudoSourceValue::getConstantPool(), 0,
Dan Gohmand3006222007-07-27 17:16:43 +00004985 false, 16);
Duncan Sands83ec4b62008-06-06 12:08:01 +00004986 if (VT.isVector()) {
Dale Johannesenace16102009-02-03 19:33:06 +00004987 return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
4988 DAG.getNode(ISD::XOR, dl, MVT::v2i64,
Scott Michelfdc40a02009-02-17 22:15:04 +00004989 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64,
Dale Johannesenace16102009-02-03 19:33:06 +00004990 Op.getOperand(0)),
4991 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, Mask)));
Evan Chengd4d01b72007-07-19 23:36:01 +00004992 } else {
Dale Johannesenace16102009-02-03 19:33:06 +00004993 return DAG.getNode(X86ISD::FXOR, dl, VT, Op.getOperand(0), Mask);
Evan Chengd4d01b72007-07-19 23:36:01 +00004994 }
Evan Cheng0db9fe62006-04-25 20:13:52 +00004995}
4996
Dan Gohman475871a2008-07-27 21:46:04 +00004997SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
4998 SDValue Op0 = Op.getOperand(0);
4999 SDValue Op1 = Op.getOperand(1);
Dale Johannesen6f38cb62009-02-07 19:59:05 +00005000 DebugLoc dl = Op.getDebugLoc();
Duncan Sands83ec4b62008-06-06 12:08:01 +00005001 MVT VT = Op.getValueType();
5002 MVT SrcVT = Op1.getValueType();
Evan Cheng73d6cf12007-01-05 21:37:56 +00005003
5004 // If second operand is smaller, extend it first.
Duncan Sands8e4eb092008-06-08 20:54:56 +00005005 if (SrcVT.bitsLT(VT)) {
Dale Johannesenace16102009-02-03 19:33:06 +00005006 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1);
Evan Cheng73d6cf12007-01-05 21:37:56 +00005007 SrcVT = VT;
5008 }
Dale Johannesen61c7ef32007-10-21 01:07:44 +00005009 // And if it is bigger, shrink it first.
Duncan Sands8e4eb092008-06-08 20:54:56 +00005010 if (SrcVT.bitsGT(VT)) {
Dale Johannesenace16102009-02-03 19:33:06 +00005011 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1));
Dale Johannesen61c7ef32007-10-21 01:07:44 +00005012 SrcVT = VT;
Dale Johannesen61c7ef32007-10-21 01:07:44 +00005013 }
5014
5015 // At this point the operands and the result should have the same
5016 // type, and that won't be f80 since that is not custom lowered.
Evan Cheng73d6cf12007-01-05 21:37:56 +00005017
Evan Cheng68c47cb2007-01-05 07:55:56 +00005018 // First get the sign bit of second operand.
5019 std::vector<Constant*> CV;
5020 if (SrcVT == MVT::f64) {
Chris Lattner02a260a2008-04-20 00:41:09 +00005021 CV.push_back(ConstantFP::get(APFloat(APInt(64, 1ULL << 63))));
5022 CV.push_back(ConstantFP::get(APFloat(APInt(64, 0))));
Evan Cheng68c47cb2007-01-05 07:55:56 +00005023 } else {
Chris Lattner02a260a2008-04-20 00:41:09 +00005024 CV.push_back(ConstantFP::get(APFloat(APInt(32, 1U << 31))));
5025 CV.push_back(ConstantFP::get(APFloat(APInt(32, 0))));
5026 CV.push_back(ConstantFP::get(APFloat(APInt(32, 0))));
5027 CV.push_back(ConstantFP::get(APFloat(APInt(32, 0))));
Evan Cheng68c47cb2007-01-05 07:55:56 +00005028 }
Dan Gohmand3006222007-07-27 17:16:43 +00005029 Constant *C = ConstantVector::get(CV);
Evan Cheng1606e8e2009-03-13 07:51:59 +00005030 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
Dale Johannesenace16102009-02-03 19:33:06 +00005031 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx,
Dan Gohman3069b872008-02-07 18:41:25 +00005032 PseudoSourceValue::getConstantPool(), 0,
Dan Gohmand3006222007-07-27 17:16:43 +00005033 false, 16);
Dale Johannesenace16102009-02-03 19:33:06 +00005034 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1);
Evan Cheng68c47cb2007-01-05 07:55:56 +00005035
5036 // Shift sign bit right or left if the two operands have different types.
Duncan Sands8e4eb092008-06-08 20:54:56 +00005037 if (SrcVT.bitsGT(VT)) {
Evan Cheng68c47cb2007-01-05 07:55:56 +00005038 // Op0 is MVT::f32, Op1 is MVT::f64.
Dale Johannesenace16102009-02-03 19:33:06 +00005039 SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, SignBit);
5040 SignBit = DAG.getNode(X86ISD::FSRL, dl, MVT::v2f64, SignBit,
Evan Cheng68c47cb2007-01-05 07:55:56 +00005041 DAG.getConstant(32, MVT::i32));
Dale Johannesenace16102009-02-03 19:33:06 +00005042 SignBit = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32, SignBit);
5043 SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, SignBit,
Chris Lattner0bd48932008-01-17 07:00:52 +00005044 DAG.getIntPtrConstant(0));
Evan Cheng68c47cb2007-01-05 07:55:56 +00005045 }
5046
Evan Cheng73d6cf12007-01-05 21:37:56 +00005047 // Clear first operand sign bit.
5048 CV.clear();
5049 if (VT == MVT::f64) {
Chris Lattner02a260a2008-04-20 00:41:09 +00005050 CV.push_back(ConstantFP::get(APFloat(APInt(64, ~(1ULL << 63)))));
5051 CV.push_back(ConstantFP::get(APFloat(APInt(64, 0))));
Evan Cheng73d6cf12007-01-05 21:37:56 +00005052 } else {
Chris Lattner02a260a2008-04-20 00:41:09 +00005053 CV.push_back(ConstantFP::get(APFloat(APInt(32, ~(1U << 31)))));
5054 CV.push_back(ConstantFP::get(APFloat(APInt(32, 0))));
5055 CV.push_back(ConstantFP::get(APFloat(APInt(32, 0))));
5056 CV.push_back(ConstantFP::get(APFloat(APInt(32, 0))));
Evan Cheng73d6cf12007-01-05 21:37:56 +00005057 }
Dan Gohmand3006222007-07-27 17:16:43 +00005058 C = ConstantVector::get(CV);
Evan Cheng1606e8e2009-03-13 07:51:59 +00005059 CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
Dale Johannesenace16102009-02-03 19:33:06 +00005060 SDValue Mask2 = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
Dan Gohman3069b872008-02-07 18:41:25 +00005061 PseudoSourceValue::getConstantPool(), 0,
Dan Gohmand3006222007-07-27 17:16:43 +00005062 false, 16);
Dale Johannesenace16102009-02-03 19:33:06 +00005063 SDValue Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Mask2);
Evan Cheng73d6cf12007-01-05 21:37:56 +00005064
5065 // Or the value with the sign bit.
Dale Johannesenace16102009-02-03 19:33:06 +00005066 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit);
Evan Cheng68c47cb2007-01-05 07:55:56 +00005067}
5068
Dan Gohman076aee32009-03-04 19:44:21 +00005069/// Emit nodes that will be selected as "test Op0,Op0", or something
5070/// equivalent.
Dan Gohman31125812009-03-07 01:58:32 +00005071SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC,
5072 SelectionDAG &DAG) {
Dan Gohman076aee32009-03-04 19:44:21 +00005073 DebugLoc dl = Op.getDebugLoc();
5074
Dan Gohman31125812009-03-07 01:58:32 +00005075 // CF and OF aren't always set the way we want. Determine which
5076 // of these we need.
5077 bool NeedCF = false;
5078 bool NeedOF = false;
5079 switch (X86CC) {
5080 case X86::COND_A: case X86::COND_AE:
5081 case X86::COND_B: case X86::COND_BE:
5082 NeedCF = true;
5083 break;
5084 case X86::COND_G: case X86::COND_GE:
5085 case X86::COND_L: case X86::COND_LE:
5086 case X86::COND_O: case X86::COND_NO:
5087 NeedOF = true;
5088 break;
5089 default: break;
5090 }
5091
Dan Gohman076aee32009-03-04 19:44:21 +00005092 // See if we can use the EFLAGS value from the operand instead of
Dan Gohman31125812009-03-07 01:58:32 +00005093 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
5094 // we prove that the arithmetic won't overflow, we can't use OF or CF.
5095 if (Op.getResNo() == 0 && !NeedOF && !NeedCF) {
Dan Gohman076aee32009-03-04 19:44:21 +00005096 unsigned Opcode = 0;
Dan Gohman51bb4742009-03-05 21:29:28 +00005097 unsigned NumOperands = 0;
Dan Gohman076aee32009-03-04 19:44:21 +00005098 switch (Op.getNode()->getOpcode()) {
5099 case ISD::ADD:
5100 // Due to an isel shortcoming, be conservative if this add is likely to
5101 // be selected as part of a load-modify-store instruction. When the root
5102 // node in a match is a store, isel doesn't know how to remap non-chain
5103 // non-flag uses of other nodes in the match, such as the ADD in this
5104 // case. This leads to the ADD being left around and reselected, with
5105 // the result being two adds in the output.
5106 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
5107 UE = Op.getNode()->use_end(); UI != UE; ++UI)
5108 if (UI->getOpcode() == ISD::STORE)
5109 goto default_case;
Dan Gohman076aee32009-03-04 19:44:21 +00005110 if (ConstantSDNode *C =
Dan Gohman4bfcf2a2009-03-05 19:32:48 +00005111 dyn_cast<ConstantSDNode>(Op.getNode()->getOperand(1))) {
5112 // An add of one will be selected as an INC.
Dan Gohman076aee32009-03-04 19:44:21 +00005113 if (C->getAPIntValue() == 1) {
5114 Opcode = X86ISD::INC;
Dan Gohman51bb4742009-03-05 21:29:28 +00005115 NumOperands = 1;
Dan Gohman076aee32009-03-04 19:44:21 +00005116 break;
5117 }
Dan Gohman4bfcf2a2009-03-05 19:32:48 +00005118 // An add of negative one (subtract of one) will be selected as a DEC.
5119 if (C->getAPIntValue().isAllOnesValue()) {
5120 Opcode = X86ISD::DEC;
Dan Gohman51bb4742009-03-05 21:29:28 +00005121 NumOperands = 1;
Dan Gohman4bfcf2a2009-03-05 19:32:48 +00005122 break;
5123 }
5124 }
Dan Gohman076aee32009-03-04 19:44:21 +00005125 // Otherwise use a regular EFLAGS-setting add.
5126 Opcode = X86ISD::ADD;
Dan Gohman51bb4742009-03-05 21:29:28 +00005127 NumOperands = 2;
Dan Gohman076aee32009-03-04 19:44:21 +00005128 break;
5129 case ISD::SUB:
5130 // Due to the ISEL shortcoming noted above, be conservative if this sub is
5131 // likely to be selected as part of a load-modify-store instruction.
5132 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
5133 UE = Op.getNode()->use_end(); UI != UE; ++UI)
5134 if (UI->getOpcode() == ISD::STORE)
5135 goto default_case;
Dan Gohman076aee32009-03-04 19:44:21 +00005136 // Otherwise use a regular EFLAGS-setting sub.
5137 Opcode = X86ISD::SUB;
Dan Gohman51bb4742009-03-05 21:29:28 +00005138 NumOperands = 2;
Dan Gohman076aee32009-03-04 19:44:21 +00005139 break;
5140 case X86ISD::ADD:
5141 case X86ISD::SUB:
5142 case X86ISD::INC:
5143 case X86ISD::DEC:
5144 return SDValue(Op.getNode(), 1);
5145 default:
5146 default_case:
5147 break;
5148 }
5149 if (Opcode != 0) {
Dan Gohmanfc166572009-04-09 23:54:40 +00005150 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
Dan Gohman076aee32009-03-04 19:44:21 +00005151 SmallVector<SDValue, 4> Ops;
Dan Gohman31125812009-03-07 01:58:32 +00005152 for (unsigned i = 0; i != NumOperands; ++i)
Dan Gohman076aee32009-03-04 19:44:21 +00005153 Ops.push_back(Op.getOperand(i));
Dan Gohmanfc166572009-04-09 23:54:40 +00005154 SDValue New = DAG.getNode(Opcode, dl, VTs, &Ops[0], NumOperands);
Dan Gohman076aee32009-03-04 19:44:21 +00005155 DAG.ReplaceAllUsesWith(Op, New);
5156 return SDValue(New.getNode(), 1);
5157 }
5158 }
5159
5160 // Otherwise just emit a CMP with 0, which is the TEST pattern.
5161 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
5162 DAG.getConstant(0, Op.getValueType()));
5163}
5164
5165/// Emit nodes that will be selected as "cmp Op0,Op1", or something
5166/// equivalent.
Dan Gohman31125812009-03-07 01:58:32 +00005167SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
5168 SelectionDAG &DAG) {
Dan Gohman076aee32009-03-04 19:44:21 +00005169 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1))
5170 if (C->getAPIntValue() == 0)
Dan Gohman31125812009-03-07 01:58:32 +00005171 return EmitTest(Op0, X86CC, DAG);
Dan Gohman076aee32009-03-04 19:44:21 +00005172
5173 DebugLoc dl = Op0.getDebugLoc();
5174 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
5175}
5176
Dan Gohman475871a2008-07-27 21:46:04 +00005177SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) {
Evan Cheng0488db92007-09-25 01:57:46 +00005178 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer");
Dan Gohman475871a2008-07-27 21:46:04 +00005179 SDValue Op0 = Op.getOperand(0);
5180 SDValue Op1 = Op.getOperand(1);
Dale Johannesen6f38cb62009-02-07 19:59:05 +00005181 DebugLoc dl = Op.getDebugLoc();
Chris Lattnere55484e2008-12-25 05:34:37 +00005182 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
Scott Michelfdc40a02009-02-17 22:15:04 +00005183
Dan Gohmane5af2d32009-01-29 01:59:02 +00005184 // Lower (X & (1 << N)) == 0 to BT(X, N).
5185 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
5186 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
Dan Gohman286575c2009-01-13 23:25:30 +00005187 if (Op0.getOpcode() == ISD::AND &&
5188 Op0.hasOneUse() &&
5189 Op1.getOpcode() == ISD::Constant &&
Dan Gohmane5af2d32009-01-29 01:59:02 +00005190 cast<ConstantSDNode>(Op1)->getZExtValue() == 0 &&
Chris Lattnere55484e2008-12-25 05:34:37 +00005191 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
Dan Gohmane5af2d32009-01-29 01:59:02 +00005192 SDValue LHS, RHS;
5193 if (Op0.getOperand(1).getOpcode() == ISD::SHL) {
5194 if (ConstantSDNode *Op010C =
5195 dyn_cast<ConstantSDNode>(Op0.getOperand(1).getOperand(0)))
5196 if (Op010C->getZExtValue() == 1) {
5197 LHS = Op0.getOperand(0);
5198 RHS = Op0.getOperand(1).getOperand(1);
5199 }
5200 } else if (Op0.getOperand(0).getOpcode() == ISD::SHL) {
5201 if (ConstantSDNode *Op000C =
5202 dyn_cast<ConstantSDNode>(Op0.getOperand(0).getOperand(0)))
5203 if (Op000C->getZExtValue() == 1) {
5204 LHS = Op0.getOperand(1);
5205 RHS = Op0.getOperand(0).getOperand(1);
5206 }
5207 } else if (Op0.getOperand(1).getOpcode() == ISD::Constant) {
5208 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op0.getOperand(1));
5209 SDValue AndLHS = Op0.getOperand(0);
5210 if (AndRHS->getZExtValue() == 1 && AndLHS.getOpcode() == ISD::SRL) {
5211 LHS = AndLHS.getOperand(0);
5212 RHS = AndLHS.getOperand(1);
5213 }
5214 }
Evan Cheng0488db92007-09-25 01:57:46 +00005215
Dan Gohmane5af2d32009-01-29 01:59:02 +00005216 if (LHS.getNode()) {
Chris Lattnere55484e2008-12-25 05:34:37 +00005217 // If LHS is i8, promote it to i16 with any_extend. There is no i8 BT
5218 // instruction. Since the shift amount is in-range-or-undefined, we know
5219 // that doing a bittest on the i16 value is ok. We extend to i32 because
5220 // the encoding for the i16 version is larger than the i32 version.
5221 if (LHS.getValueType() == MVT::i8)
Dale Johannesenace16102009-02-03 19:33:06 +00005222 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS);
Chris Lattnere55484e2008-12-25 05:34:37 +00005223
5224 // If the operand types disagree, extend the shift amount to match. Since
5225 // BT ignores high bits (like shifts) we can use anyextend.
5226 if (LHS.getValueType() != RHS.getValueType())
Dale Johannesenace16102009-02-03 19:33:06 +00005227 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS);
Dan Gohmane5af2d32009-01-29 01:59:02 +00005228
Dale Johannesenace16102009-02-03 19:33:06 +00005229 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS);
Dan Gohman653456c2009-01-07 00:15:08 +00005230 unsigned Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
Dale Johannesenace16102009-02-03 19:33:06 +00005231 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
Chris Lattnere55484e2008-12-25 05:34:37 +00005232 DAG.getConstant(Cond, MVT::i8), BT);
5233 }
5234 }
5235
5236 bool isFP = Op.getOperand(1).getValueType().isFloatingPoint();
5237 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG);
Scott Michelfdc40a02009-02-17 22:15:04 +00005238
Dan Gohman31125812009-03-07 01:58:32 +00005239 SDValue Cond = EmitCmp(Op0, Op1, X86CC, DAG);
Dale Johannesenace16102009-02-03 19:33:06 +00005240 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
Chris Lattner43287082008-12-24 00:11:37 +00005241 DAG.getConstant(X86CC, MVT::i8), Cond);
Evan Cheng0488db92007-09-25 01:57:46 +00005242}
5243
Dan Gohman475871a2008-07-27 21:46:04 +00005244SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
5245 SDValue Cond;
5246 SDValue Op0 = Op.getOperand(0);
5247 SDValue Op1 = Op.getOperand(1);
5248 SDValue CC = Op.getOperand(2);
Nate Begeman30a0de92008-07-17 16:51:19 +00005249 MVT VT = Op.getValueType();
5250 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
5251 bool isFP = Op.getOperand(1).getValueType().isFloatingPoint();
Dale Johannesen6f38cb62009-02-07 19:59:05 +00005252 DebugLoc dl = Op.getDebugLoc();
Nate Begeman30a0de92008-07-17 16:51:19 +00005253
5254 if (isFP) {
5255 unsigned SSECC = 8;
Evan Chenge9d50352008-08-05 22:19:15 +00005256 MVT VT0 = Op0.getValueType();
5257 assert(VT0 == MVT::v4f32 || VT0 == MVT::v2f64);
5258 unsigned Opc = VT0 == MVT::v4f32 ? X86ISD::CMPPS : X86ISD::CMPPD;
Nate Begeman30a0de92008-07-17 16:51:19 +00005259 bool Swap = false;
5260
5261 switch (SetCCOpcode) {
5262 default: break;
Nate Begemanfb8ead02008-07-25 19:05:58 +00005263 case ISD::SETOEQ:
Nate Begeman30a0de92008-07-17 16:51:19 +00005264 case ISD::SETEQ: SSECC = 0; break;
Scott Michelfdc40a02009-02-17 22:15:04 +00005265 case ISD::SETOGT:
Nate Begeman30a0de92008-07-17 16:51:19 +00005266 case ISD::SETGT: Swap = true; // Fallthrough
5267 case ISD::SETLT:
5268 case ISD::SETOLT: SSECC = 1; break;
5269 case ISD::SETOGE:
5270 case ISD::SETGE: Swap = true; // Fallthrough
5271 case ISD::SETLE:
5272 case ISD::SETOLE: SSECC = 2; break;
5273 case ISD::SETUO: SSECC = 3; break;
Nate Begemanfb8ead02008-07-25 19:05:58 +00005274 case ISD::SETUNE:
Nate Begeman30a0de92008-07-17 16:51:19 +00005275 case ISD::SETNE: SSECC = 4; break;
5276 case ISD::SETULE: Swap = true;
5277 case ISD::SETUGE: SSECC = 5; break;
5278 case ISD::SETULT: Swap = true;
5279 case ISD::SETUGT: SSECC = 6; break;
5280 case ISD::SETO: SSECC = 7; break;
5281 }
5282 if (Swap)
5283 std::swap(Op0, Op1);
5284
Nate Begemanfb8ead02008-07-25 19:05:58 +00005285 // In the two special cases we can't handle, emit two comparisons.
Nate Begeman30a0de92008-07-17 16:51:19 +00005286 if (SSECC == 8) {
Nate Begemanfb8ead02008-07-25 19:05:58 +00005287 if (SetCCOpcode == ISD::SETUEQ) {
Dan Gohman475871a2008-07-27 21:46:04 +00005288 SDValue UNORD, EQ;
Dale Johannesenace16102009-02-03 19:33:06 +00005289 UNORD = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(3, MVT::i8));
5290 EQ = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(0, MVT::i8));
5291 return DAG.getNode(ISD::OR, dl, VT, UNORD, EQ);
Nate Begemanfb8ead02008-07-25 19:05:58 +00005292 }
5293 else if (SetCCOpcode == ISD::SETONE) {
Dan Gohman475871a2008-07-27 21:46:04 +00005294 SDValue ORD, NEQ;
Dale Johannesenace16102009-02-03 19:33:06 +00005295 ORD = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(7, MVT::i8));
5296 NEQ = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(4, MVT::i8));
5297 return DAG.getNode(ISD::AND, dl, VT, ORD, NEQ);
Nate Begemanfb8ead02008-07-25 19:05:58 +00005298 }
5299 assert(0 && "Illegal FP comparison");
Nate Begeman30a0de92008-07-17 16:51:19 +00005300 }
5301 // Handle all other FP comparisons here.
Dale Johannesenace16102009-02-03 19:33:06 +00005302 return DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(SSECC, MVT::i8));
Nate Begeman30a0de92008-07-17 16:51:19 +00005303 }
Scott Michelfdc40a02009-02-17 22:15:04 +00005304
Nate Begeman30a0de92008-07-17 16:51:19 +00005305 // We are handling one of the integer comparisons here. Since SSE only has
5306 // GT and EQ comparisons for integer, swapping operands and multiple
5307 // operations may be required for some comparisons.
5308 unsigned Opc = 0, EQOpc = 0, GTOpc = 0;
5309 bool Swap = false, Invert = false, FlipSigns = false;
Scott Michelfdc40a02009-02-17 22:15:04 +00005310
Nate Begeman30a0de92008-07-17 16:51:19 +00005311 switch (VT.getSimpleVT()) {
5312 default: break;
5313 case MVT::v16i8: EQOpc = X86ISD::PCMPEQB; GTOpc = X86ISD::PCMPGTB; break;
5314 case MVT::v8i16: EQOpc = X86ISD::PCMPEQW; GTOpc = X86ISD::PCMPGTW; break;
5315 case MVT::v4i32: EQOpc = X86ISD::PCMPEQD; GTOpc = X86ISD::PCMPGTD; break;
5316 case MVT::v2i64: EQOpc = X86ISD::PCMPEQQ; GTOpc = X86ISD::PCMPGTQ; break;
5317 }
Scott Michelfdc40a02009-02-17 22:15:04 +00005318
Nate Begeman30a0de92008-07-17 16:51:19 +00005319 switch (SetCCOpcode) {
5320 default: break;
5321 case ISD::SETNE: Invert = true;
5322 case ISD::SETEQ: Opc = EQOpc; break;
5323 case ISD::SETLT: Swap = true;
5324 case ISD::SETGT: Opc = GTOpc; break;
5325 case ISD::SETGE: Swap = true;
5326 case ISD::SETLE: Opc = GTOpc; Invert = true; break;
5327 case ISD::SETULT: Swap = true;
5328 case ISD::SETUGT: Opc = GTOpc; FlipSigns = true; break;
5329 case ISD::SETUGE: Swap = true;
5330 case ISD::SETULE: Opc = GTOpc; FlipSigns = true; Invert = true; break;
5331 }
5332 if (Swap)
5333 std::swap(Op0, Op1);
Scott Michelfdc40a02009-02-17 22:15:04 +00005334
Nate Begeman30a0de92008-07-17 16:51:19 +00005335 // Since SSE has no unsigned integer comparisons, we need to flip the sign
5336 // bits of the inputs before performing those operations.
5337 if (FlipSigns) {
5338 MVT EltVT = VT.getVectorElementType();
Duncan Sandsb0d5cdd2009-02-01 18:06:53 +00005339 SDValue SignBit = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()),
5340 EltVT);
Dan Gohman475871a2008-07-27 21:46:04 +00005341 std::vector<SDValue> SignBits(VT.getVectorNumElements(), SignBit);
Evan Chenga87008d2009-02-25 22:49:59 +00005342 SDValue SignVec = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &SignBits[0],
5343 SignBits.size());
Dale Johannesenace16102009-02-03 19:33:06 +00005344 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SignVec);
5345 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SignVec);
Nate Begeman30a0de92008-07-17 16:51:19 +00005346 }
Scott Michelfdc40a02009-02-17 22:15:04 +00005347
Dale Johannesenace16102009-02-03 19:33:06 +00005348 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
Nate Begeman30a0de92008-07-17 16:51:19 +00005349
5350 // If the logical-not of the result is required, perform that now.
Bob Wilson4c245462009-01-22 17:39:32 +00005351 if (Invert)
Dale Johannesenace16102009-02-03 19:33:06 +00005352 Result = DAG.getNOT(dl, Result, VT);
Bob Wilson4c245462009-01-22 17:39:32 +00005353
Nate Begeman30a0de92008-07-17 16:51:19 +00005354 return Result;
5355}
Evan Cheng0488db92007-09-25 01:57:46 +00005356
Evan Cheng370e5342008-12-03 08:38:43 +00005357// isX86LogicalCmp - Return true if opcode is a X86 logical comparison.
Dan Gohman076aee32009-03-04 19:44:21 +00005358static bool isX86LogicalCmp(SDValue Op) {
5359 unsigned Opc = Op.getNode()->getOpcode();
5360 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI)
5361 return true;
5362 if (Op.getResNo() == 1 &&
5363 (Opc == X86ISD::ADD ||
5364 Opc == X86ISD::SUB ||
5365 Opc == X86ISD::SMUL ||
5366 Opc == X86ISD::UMUL ||
5367 Opc == X86ISD::INC ||
5368 Opc == X86ISD::DEC))
5369 return true;
5370
5371 return false;
Evan Cheng370e5342008-12-03 08:38:43 +00005372}
5373
Dan Gohman475871a2008-07-27 21:46:04 +00005374SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) {
Evan Cheng734503b2006-09-11 02:19:56 +00005375 bool addTest = true;
Dan Gohman475871a2008-07-27 21:46:04 +00005376 SDValue Cond = Op.getOperand(0);
Dale Johannesen6f38cb62009-02-07 19:59:05 +00005377 DebugLoc dl = Op.getDebugLoc();
Dan Gohman475871a2008-07-27 21:46:04 +00005378 SDValue CC;
Evan Cheng9bba8942006-01-26 02:13:10 +00005379
Evan Cheng734503b2006-09-11 02:19:56 +00005380 if (Cond.getOpcode() == ISD::SETCC)
Evan Chenge5f62042007-09-29 00:00:36 +00005381 Cond = LowerSETCC(Cond, DAG);
Evan Cheng734503b2006-09-11 02:19:56 +00005382
Evan Cheng3f41d662007-10-08 22:16:29 +00005383 // If condition flag is set by a X86ISD::CMP, then use it as the condition
5384 // setting operand in place of the X86ISD::SETCC.
Evan Cheng734503b2006-09-11 02:19:56 +00005385 if (Cond.getOpcode() == X86ISD::SETCC) {
5386 CC = Cond.getOperand(0);
5387
Dan Gohman475871a2008-07-27 21:46:04 +00005388 SDValue Cmp = Cond.getOperand(1);
Evan Cheng734503b2006-09-11 02:19:56 +00005389 unsigned Opc = Cmp.getOpcode();
Duncan Sands83ec4b62008-06-06 12:08:01 +00005390 MVT VT = Op.getValueType();
Scott Michelfdc40a02009-02-17 22:15:04 +00005391
Evan Cheng3f41d662007-10-08 22:16:29 +00005392 bool IllegalFPCMov = false;
Duncan Sands83ec4b62008-06-06 12:08:01 +00005393 if (VT.isFloatingPoint() && !VT.isVector() &&
Chris Lattner78631162008-01-16 06:24:21 +00005394 !isScalarFPTypeInSSEReg(VT)) // FPStack?
Dan Gohman7810bfe2008-09-26 21:54:37 +00005395 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
Scott Michelfdc40a02009-02-17 22:15:04 +00005396
Chris Lattnerd1980a52009-03-12 06:52:53 +00005397 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
5398 Opc == X86ISD::BT) { // FIXME
Evan Cheng3f41d662007-10-08 22:16:29 +00005399 Cond = Cmp;
Evan Cheng0488db92007-09-25 01:57:46 +00005400 addTest = false;
5401 }
5402 }
5403
5404 if (addTest) {
5405 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
Dan Gohman31125812009-03-07 01:58:32 +00005406 Cond = EmitTest(Cond, X86::COND_NE, DAG);
Evan Cheng0488db92007-09-25 01:57:46 +00005407 }
5408
Dan Gohmanfc166572009-04-09 23:54:40 +00005409 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Flag);
Dan Gohman475871a2008-07-27 21:46:04 +00005410 SmallVector<SDValue, 4> Ops;
Evan Cheng0488db92007-09-25 01:57:46 +00005411 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
5412 // condition is true.
5413 Ops.push_back(Op.getOperand(2));
5414 Ops.push_back(Op.getOperand(1));
5415 Ops.push_back(CC);
5416 Ops.push_back(Cond);
Dan Gohmanfc166572009-04-09 23:54:40 +00005417 return DAG.getNode(X86ISD::CMOV, dl, VTs, &Ops[0], Ops.size());
Evan Cheng0488db92007-09-25 01:57:46 +00005418}
5419
Evan Cheng370e5342008-12-03 08:38:43 +00005420// isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or
5421// ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart
5422// from the AND / OR.
5423static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
5424 Opc = Op.getOpcode();
5425 if (Opc != ISD::OR && Opc != ISD::AND)
5426 return false;
5427 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
5428 Op.getOperand(0).hasOneUse() &&
5429 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
5430 Op.getOperand(1).hasOneUse());
5431}
5432
Evan Cheng961d6d42009-02-02 08:19:07 +00005433// isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and
5434// 1 and that the SETCC node has a single use.
Evan Cheng67ad9db2009-02-02 08:07:36 +00005435static bool isXor1OfSetCC(SDValue Op) {
5436 if (Op.getOpcode() != ISD::XOR)
5437 return false;
5438 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
5439 if (N1C && N1C->getAPIntValue() == 1) {
5440 return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
5441 Op.getOperand(0).hasOneUse();
5442 }
5443 return false;
5444}
5445
Dan Gohman475871a2008-07-27 21:46:04 +00005446SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) {
Evan Cheng734503b2006-09-11 02:19:56 +00005447 bool addTest = true;
Dan Gohman475871a2008-07-27 21:46:04 +00005448 SDValue Chain = Op.getOperand(0);
5449 SDValue Cond = Op.getOperand(1);
5450 SDValue Dest = Op.getOperand(2);
Dale Johannesen6f38cb62009-02-07 19:59:05 +00005451 DebugLoc dl = Op.getDebugLoc();
Dan Gohman475871a2008-07-27 21:46:04 +00005452 SDValue CC;
Evan Cheng734503b2006-09-11 02:19:56 +00005453
Evan Cheng0db9fe62006-04-25 20:13:52 +00005454 if (Cond.getOpcode() == ISD::SETCC)
Evan Chenge5f62042007-09-29 00:00:36 +00005455 Cond = LowerSETCC(Cond, DAG);
Chris Lattnere55484e2008-12-25 05:34:37 +00005456#if 0
5457 // FIXME: LowerXALUO doesn't handle these!!
Bill Wendlingd350e022008-12-12 21:15:41 +00005458 else if (Cond.getOpcode() == X86ISD::ADD ||
5459 Cond.getOpcode() == X86ISD::SUB ||
5460 Cond.getOpcode() == X86ISD::SMUL ||
5461 Cond.getOpcode() == X86ISD::UMUL)
Bill Wendling74c37652008-12-09 22:08:41 +00005462 Cond = LowerXALUO(Cond, DAG);
Chris Lattnere55484e2008-12-25 05:34:37 +00005463#endif
Scott Michelfdc40a02009-02-17 22:15:04 +00005464
Evan Cheng3f41d662007-10-08 22:16:29 +00005465 // If condition flag is set by a X86ISD::CMP, then use it as the condition
5466 // setting operand in place of the X86ISD::SETCC.
Evan Cheng0db9fe62006-04-25 20:13:52 +00005467 if (Cond.getOpcode() == X86ISD::SETCC) {
Evan Cheng734503b2006-09-11 02:19:56 +00005468 CC = Cond.getOperand(0);
Evan Cheng0db9fe62006-04-25 20:13:52 +00005469
Dan Gohman475871a2008-07-27 21:46:04 +00005470 SDValue Cmp = Cond.getOperand(1);
Evan Cheng734503b2006-09-11 02:19:56 +00005471 unsigned Opc = Cmp.getOpcode();
Chris Lattnere55484e2008-12-25 05:34:37 +00005472 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
Dan Gohman076aee32009-03-04 19:44:21 +00005473 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
Evan Cheng3f41d662007-10-08 22:16:29 +00005474 Cond = Cmp;
Evan Cheng0488db92007-09-25 01:57:46 +00005475 addTest = false;
Bill Wendling61edeb52008-12-02 01:06:39 +00005476 } else {
Evan Cheng370e5342008-12-03 08:38:43 +00005477 switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
Bill Wendling0ea25cb2008-12-03 08:32:02 +00005478 default: break;
5479 case X86::COND_O:
Dan Gohman653456c2009-01-07 00:15:08 +00005480 case X86::COND_B:
Chris Lattnere55484e2008-12-25 05:34:37 +00005481 // These can only come from an arithmetic instruction with overflow,
5482 // e.g. SADDO, UADDO.
Bill Wendling0ea25cb2008-12-03 08:32:02 +00005483 Cond = Cond.getNode()->getOperand(1);
5484 addTest = false;
5485 break;
Bill Wendling61edeb52008-12-02 01:06:39 +00005486 }
Evan Cheng0488db92007-09-25 01:57:46 +00005487 }
Evan Cheng370e5342008-12-03 08:38:43 +00005488 } else {
5489 unsigned CondOpc;
5490 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
5491 SDValue Cmp = Cond.getOperand(0).getOperand(1);
Evan Cheng370e5342008-12-03 08:38:43 +00005492 if (CondOpc == ISD::OR) {
5493 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
5494 // two branches instead of an explicit OR instruction with a
5495 // separate test.
5496 if (Cmp == Cond.getOperand(1).getOperand(1) &&
Dan Gohman076aee32009-03-04 19:44:21 +00005497 isX86LogicalCmp(Cmp)) {
Evan Cheng370e5342008-12-03 08:38:43 +00005498 CC = Cond.getOperand(0).getOperand(0);
Dale Johannesene4d209d2009-02-03 20:21:25 +00005499 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
Evan Cheng370e5342008-12-03 08:38:43 +00005500 Chain, Dest, CC, Cmp);
5501 CC = Cond.getOperand(1).getOperand(0);
5502 Cond = Cmp;
5503 addTest = false;
5504 }
5505 } else { // ISD::AND
5506 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
5507 // two branches instead of an explicit AND instruction with a
5508 // separate test. However, we only do this if this block doesn't
5509 // have a fall-through edge, because this requires an explicit
5510 // jmp when the condition is false.
5511 if (Cmp == Cond.getOperand(1).getOperand(1) &&
Dan Gohman076aee32009-03-04 19:44:21 +00005512 isX86LogicalCmp(Cmp) &&
Evan Cheng370e5342008-12-03 08:38:43 +00005513 Op.getNode()->hasOneUse()) {
5514 X86::CondCode CCode =
5515 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
5516 CCode = X86::GetOppositeBranchCondition(CCode);
5517 CC = DAG.getConstant(CCode, MVT::i8);
5518 SDValue User = SDValue(*Op.getNode()->use_begin(), 0);
5519 // Look for an unconditional branch following this conditional branch.
5520 // We need this because we need to reverse the successors in order
5521 // to implement FCMP_OEQ.
5522 if (User.getOpcode() == ISD::BR) {
5523 SDValue FalseBB = User.getOperand(1);
5524 SDValue NewBR =
5525 DAG.UpdateNodeOperands(User, User.getOperand(0), Dest);
5526 assert(NewBR == User);
5527 Dest = FalseBB;
Dan Gohman279c22e2008-10-21 03:29:32 +00005528
Dale Johannesene4d209d2009-02-03 20:21:25 +00005529 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
Evan Cheng370e5342008-12-03 08:38:43 +00005530 Chain, Dest, CC, Cmp);
5531 X86::CondCode CCode =
5532 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
5533 CCode = X86::GetOppositeBranchCondition(CCode);
5534 CC = DAG.getConstant(CCode, MVT::i8);
5535 Cond = Cmp;
5536 addTest = false;
5537 }
5538 }
Dan Gohman279c22e2008-10-21 03:29:32 +00005539 }
Evan Cheng67ad9db2009-02-02 08:07:36 +00005540 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
5541 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
5542 // It should be transformed during dag combiner except when the condition
5543 // is set by a arithmetics with overflow node.
5544 X86::CondCode CCode =
5545 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
5546 CCode = X86::GetOppositeBranchCondition(CCode);
5547 CC = DAG.getConstant(CCode, MVT::i8);
5548 Cond = Cond.getOperand(0).getOperand(1);
5549 addTest = false;
Dan Gohman279c22e2008-10-21 03:29:32 +00005550 }
Evan Cheng0488db92007-09-25 01:57:46 +00005551 }
5552
5553 if (addTest) {
5554 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
Dan Gohman31125812009-03-07 01:58:32 +00005555 Cond = EmitTest(Cond, X86::COND_NE, DAG);
Evan Cheng0488db92007-09-25 01:57:46 +00005556 }
Dale Johannesene4d209d2009-02-03 20:21:25 +00005557 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
Dan Gohman279c22e2008-10-21 03:29:32 +00005558 Chain, Dest, CC, Cond);
Evan Cheng0488db92007-09-25 01:57:46 +00005559}
5560
Anton Korobeynikove060b532007-04-17 19:34:00 +00005561
5562// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
5563// Calls to _alloca is needed to probe the stack when allocating more than 4k
5564// bytes in one go. Touching the stack at 4K increments is necessary to ensure
5565// that the guard pages used by the OS virtual memory manager are allocated in
5566// correct sequence.
Dan Gohman475871a2008-07-27 21:46:04 +00005567SDValue
5568X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
Anton Korobeynikov4304bcc2007-07-05 20:36:08 +00005569 SelectionDAG &DAG) {
Anton Korobeynikove060b532007-04-17 19:34:00 +00005570 assert(Subtarget->isTargetCygMing() &&
5571 "This should be used only on Cygwin/Mingw targets");
Dale Johannesen6f38cb62009-02-07 19:59:05 +00005572 DebugLoc dl = Op.getDebugLoc();
Anton Korobeynikov096b4612008-06-11 20:16:42 +00005573
Anton Korobeynikov57fc00d2007-04-17 09:20:00 +00005574 // Get the inputs.
Dan Gohman475871a2008-07-27 21:46:04 +00005575 SDValue Chain = Op.getOperand(0);
5576 SDValue Size = Op.getOperand(1);
Anton Korobeynikov57fc00d2007-04-17 09:20:00 +00005577 // FIXME: Ensure alignment here
5578
Dan Gohman475871a2008-07-27 21:46:04 +00005579 SDValue Flag;
Anton Korobeynikov096b4612008-06-11 20:16:42 +00005580
Duncan Sands83ec4b62008-06-06 12:08:01 +00005581 MVT IntPtr = getPointerTy();
5582 MVT SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32;
Anton Korobeynikov57fc00d2007-04-17 09:20:00 +00005583
Chris Lattnere563bbc2008-10-11 22:08:30 +00005584 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true));
Anton Korobeynikov096b4612008-06-11 20:16:42 +00005585
Dale Johannesendd64c412009-02-04 00:33:20 +00005586 Chain = DAG.getCopyToReg(Chain, dl, X86::EAX, Size, Flag);
Anton Korobeynikov4304bcc2007-07-05 20:36:08 +00005587 Flag = Chain.getValue(1);
5588
5589 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
Dan Gohman475871a2008-07-27 21:46:04 +00005590 SDValue Ops[] = { Chain,
Bill Wendling056292f2008-09-16 21:48:12 +00005591 DAG.getTargetExternalSymbol("_alloca", IntPtr),
Anton Korobeynikov4304bcc2007-07-05 20:36:08 +00005592 DAG.getRegister(X86::EAX, IntPtr),
Anton Korobeynikov096b4612008-06-11 20:16:42 +00005593 DAG.getRegister(X86StackPtr, SPTy),
Anton Korobeynikov4304bcc2007-07-05 20:36:08 +00005594 Flag };
Dale Johannesene4d209d2009-02-03 20:21:25 +00005595 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops, 5);
Anton Korobeynikov4304bcc2007-07-05 20:36:08 +00005596 Flag = Chain.getValue(1);
5597
Anton Korobeynikov096b4612008-06-11 20:16:42 +00005598 Chain = DAG.getCALLSEQ_END(Chain,
Chris Lattnere563bbc2008-10-11 22:08:30 +00005599 DAG.getIntPtrConstant(0, true),
5600 DAG.getIntPtrConstant(0, true),
Anton Korobeynikov096b4612008-06-11 20:16:42 +00005601 Flag);
5602
Dale Johannesendd64c412009-02-04 00:33:20 +00005603 Chain = DAG.getCopyFromReg(Chain, dl, X86StackPtr, SPTy).getValue(1);
Anton Korobeynikov096b4612008-06-11 20:16:42 +00005604
Dan Gohman475871a2008-07-27 21:46:04 +00005605 SDValue Ops1[2] = { Chain.getValue(0), Chain };
Dale Johannesene4d209d2009-02-03 20:21:25 +00005606 return DAG.getMergeValues(Ops1, 2, dl);
Anton Korobeynikov57fc00d2007-04-17 09:20:00 +00005607}
5608
Dan Gohman475871a2008-07-27 21:46:04 +00005609SDValue
Dale Johannesen0f502f62009-02-03 22:26:09 +00005610X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
Bill Wendling6f287b22008-09-30 21:22:07 +00005611 SDValue Chain,
5612 SDValue Dst, SDValue Src,
5613 SDValue Size, unsigned Align,
5614 const Value *DstSV,
Bill Wendling6158d842008-10-01 00:59:58 +00005615 uint64_t DstSVOff) {
Dan Gohman707e0182008-04-12 04:36:06 +00005616 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
Evan Cheng0db9fe62006-04-25 20:13:52 +00005617
Bill Wendling6f287b22008-09-30 21:22:07 +00005618 // If not DWORD aligned or size is more than the threshold, call the library.
5619 // The libc version is likely to be faster for these cases. It can use the
5620 // address value and run time information about the CPU.
Evan Cheng1887c1c2008-08-21 21:00:15 +00005621 if ((Align & 3) != 0 ||
Dan Gohman707e0182008-04-12 04:36:06 +00005622 !ConstantSize ||
Dan Gohmanf5aeb1a2008-09-12 16:56:44 +00005623 ConstantSize->getZExtValue() >
5624 getSubtarget()->getMaxInlineSizeThreshold()) {
Dan Gohman475871a2008-07-27 21:46:04 +00005625 SDValue InFlag(0, 0);
Dan Gohman68d599d2008-04-01 20:38:36 +00005626
5627 // Check to see if there is a specialized entry-point for memory zeroing.
Dan Gohman707e0182008-04-12 04:36:06 +00005628 ConstantSDNode *V = dyn_cast<ConstantSDNode>(Src);
Bill Wendling6f287b22008-09-30 21:22:07 +00005629
Bill Wendling6158d842008-10-01 00:59:58 +00005630 if (const char *bzeroEntry = V &&
5631 V->isNullValue() ? Subtarget->getBZeroEntry() : 0) {
5632 MVT IntPtr = getPointerTy();
5633 const Type *IntPtrTy = TD->getIntPtrType();
Scott Michelfdc40a02009-02-17 22:15:04 +00005634 TargetLowering::ArgListTy Args;
Bill Wendling6158d842008-10-01 00:59:58 +00005635 TargetLowering::ArgListEntry Entry;
5636 Entry.Node = Dst;
5637 Entry.Ty = IntPtrTy;
5638 Args.push_back(Entry);
5639 Entry.Node = Size;
5640 Args.push_back(Entry);
5641 std::pair<SDValue,SDValue> CallResult =
Scott Michelfdc40a02009-02-17 22:15:04 +00005642 LowerCallTo(Chain, Type::VoidTy, false, false, false, false,
5643 CallingConv::C, false,
Dale Johannesen0f502f62009-02-03 22:26:09 +00005644 DAG.getExternalSymbol(bzeroEntry, IntPtr), Args, DAG, dl);
Bill Wendling6158d842008-10-01 00:59:58 +00005645 return CallResult.second;
Dan Gohman68d599d2008-04-01 20:38:36 +00005646 }
5647
Dan Gohman707e0182008-04-12 04:36:06 +00005648 // Otherwise have the target-independent code call memset.
Dan Gohman475871a2008-07-27 21:46:04 +00005649 return SDValue();
Evan Cheng48090aa2006-03-21 23:01:21 +00005650 }
Evan Chengb9df0ca2006-03-22 02:53:00 +00005651
Dan Gohmanf5aeb1a2008-09-12 16:56:44 +00005652 uint64_t SizeVal = ConstantSize->getZExtValue();
Dan Gohman475871a2008-07-27 21:46:04 +00005653 SDValue InFlag(0, 0);
Duncan Sands83ec4b62008-06-06 12:08:01 +00005654 MVT AVT;
Dan Gohman475871a2008-07-27 21:46:04 +00005655 SDValue Count;
Dan Gohman707e0182008-04-12 04:36:06 +00005656 ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Src);
Evan Cheng0db9fe62006-04-25 20:13:52 +00005657 unsigned BytesLeft = 0;
5658 bool TwoRepStos = false;
5659 if (ValC) {
5660 unsigned ValReg;
Dan Gohmanf5aeb1a2008-09-12 16:56:44 +00005661 uint64_t Val = ValC->getZExtValue() & 255;
Evan Cheng5ced1d82006-04-06 23:23:56 +00005662
Evan Cheng0db9fe62006-04-25 20:13:52 +00005663 // If the value is a constant, then we can potentially use larger sets.
5664 switch (Align & 3) {
Evan Cheng1887c1c2008-08-21 21:00:15 +00005665 case 2: // WORD aligned
5666 AVT = MVT::i16;
5667 ValReg = X86::AX;
5668 Val = (Val << 8) | Val;
5669 break;
5670 case 0: // DWORD aligned
5671 AVT = MVT::i32;
5672 ValReg = X86::EAX;
5673 Val = (Val << 8) | Val;
5674 Val = (Val << 16) | Val;
5675 if (Subtarget->is64Bit() && ((Align & 0x7) == 0)) { // QWORD aligned
5676 AVT = MVT::i64;
5677 ValReg = X86::RAX;
5678 Val = (Val << 32) | Val;
5679 }
5680 break;
5681 default: // Byte aligned
5682 AVT = MVT::i8;
5683 ValReg = X86::AL;
5684 Count = DAG.getIntPtrConstant(SizeVal);
5685 break;
Evan Cheng80d428c2006-04-19 22:48:17 +00005686 }
5687
Duncan Sands8e4eb092008-06-08 20:54:56 +00005688 if (AVT.bitsGT(MVT::i8)) {
Duncan Sands83ec4b62008-06-06 12:08:01 +00005689 unsigned UBytes = AVT.getSizeInBits() / 8;
Dan Gohman707e0182008-04-12 04:36:06 +00005690 Count = DAG.getIntPtrConstant(SizeVal / UBytes);
5691 BytesLeft = SizeVal % UBytes;
Evan Cheng25ab6902006-09-08 06:48:29 +00005692 }
5693
Dale Johannesen0f502f62009-02-03 22:26:09 +00005694 Chain = DAG.getCopyToReg(Chain, dl, ValReg, DAG.getConstant(Val, AVT),
Evan Cheng0db9fe62006-04-25 20:13:52 +00005695 InFlag);
5696 InFlag = Chain.getValue(1);
5697 } else {
5698 AVT = MVT::i8;
Dan Gohmanbcda2852008-04-16 01:32:32 +00005699 Count = DAG.getIntPtrConstant(SizeVal);
Dale Johannesen0f502f62009-02-03 22:26:09 +00005700 Chain = DAG.getCopyToReg(Chain, dl, X86::AL, Src, InFlag);
Evan Cheng0db9fe62006-04-25 20:13:52 +00005701 InFlag = Chain.getValue(1);
Evan Chengb9df0ca2006-03-22 02:53:00 +00005702 }
Evan Chengc78d3b42006-04-24 18:01:45 +00005703
Scott Michelfdc40a02009-02-17 22:15:04 +00005704 Chain = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RCX :
Dale Johannesen0f502f62009-02-03 22:26:09 +00005705 X86::ECX,
Evan Cheng25ab6902006-09-08 06:48:29 +00005706 Count, InFlag);
Evan Cheng0db9fe62006-04-25 20:13:52 +00005707 InFlag = Chain.getValue(1);
Scott Michelfdc40a02009-02-17 22:15:04 +00005708 Chain = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RDI :
Dale Johannesen0f502f62009-02-03 22:26:09 +00005709 X86::EDI,
Dan Gohman707e0182008-04-12 04:36:06 +00005710 Dst, InFlag);
Evan Cheng0db9fe62006-04-25 20:13:52 +00005711 InFlag = Chain.getValue(1);
Evan Chenga0b3afb2006-03-27 07:00:16 +00005712
Chris Lattnerd96d0722007-02-25 06:40:16 +00005713 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
Dan Gohman475871a2008-07-27 21:46:04 +00005714 SmallVector<SDValue, 8> Ops;
Evan Cheng0db9fe62006-04-25 20:13:52 +00005715 Ops.push_back(Chain);
5716 Ops.push_back(DAG.getValueType(AVT));
5717 Ops.push_back(InFlag);
Dale Johannesen0f502f62009-02-03 22:26:09 +00005718 Chain = DAG.getNode(X86ISD::REP_STOS, dl, Tys, &Ops[0], Ops.size());
Evan Chengc78d3b42006-04-24 18:01:45 +00005719
Evan Cheng0db9fe62006-04-25 20:13:52 +00005720 if (TwoRepStos) {
5721 InFlag = Chain.getValue(1);
Dan Gohman707e0182008-04-12 04:36:06 +00005722 Count = Size;
Duncan Sands83ec4b62008-06-06 12:08:01 +00005723 MVT CVT = Count.getValueType();
Dale Johannesen0f502f62009-02-03 22:26:09 +00005724 SDValue Left = DAG.getNode(ISD::AND, dl, CVT, Count,
Evan Cheng25ab6902006-09-08 06:48:29 +00005725 DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT));
Scott Michelfdc40a02009-02-17 22:15:04 +00005726 Chain = DAG.getCopyToReg(Chain, dl, (CVT == MVT::i64) ? X86::RCX :
Dale Johannesen0f502f62009-02-03 22:26:09 +00005727 X86::ECX,
Evan Cheng25ab6902006-09-08 06:48:29 +00005728 Left, InFlag);
Evan Cheng0db9fe62006-04-25 20:13:52 +00005729 InFlag = Chain.getValue(1);
Chris Lattnerd96d0722007-02-25 06:40:16 +00005730 Tys = DAG.getVTList(MVT::Other, MVT::Flag);
Evan Cheng0db9fe62006-04-25 20:13:52 +00005731 Ops.clear();
5732 Ops.push_back(Chain);
5733 Ops.push_back(DAG.getValueType(MVT::i8));
5734 Ops.push_back(InFlag);
Dale Johannesen0f502f62009-02-03 22:26:09 +00005735 Chain = DAG.getNode(X86ISD::REP_STOS, dl, Tys, &Ops[0], Ops.size());
Evan Cheng0db9fe62006-04-25 20:13:52 +00005736 } else if (BytesLeft) {
Dan Gohman707e0182008-04-12 04:36:06 +00005737 // Handle the last 1 - 7 bytes.
5738 unsigned Offset = SizeVal - BytesLeft;
Duncan Sands83ec4b62008-06-06 12:08:01 +00005739 MVT AddrVT = Dst.getValueType();
5740 MVT SizeVT = Size.getValueType();
Dan Gohman707e0182008-04-12 04:36:06 +00005741
Dale Johannesen0f502f62009-02-03 22:26:09 +00005742 Chain = DAG.getMemset(Chain, dl,
5743 DAG.getNode(ISD::ADD, dl, AddrVT, Dst,
Dan Gohman707e0182008-04-12 04:36:06 +00005744 DAG.getConstant(Offset, AddrVT)),
5745 Src,
5746 DAG.getConstant(BytesLeft, SizeVT),
Dan Gohman1f13c682008-04-28 17:15:20 +00005747 Align, DstSV, DstSVOff + Offset);
Evan Cheng386031a2006-03-24 07:29:27 +00005748 }
Evan Cheng11e15b32006-04-03 20:53:28 +00005749
Dan Gohman707e0182008-04-12 04:36:06 +00005750 // TODO: Use a Tokenfactor, as in memcpy, instead of a single chain.
Evan Cheng0db9fe62006-04-25 20:13:52 +00005751 return Chain;
5752}
Evan Cheng11e15b32006-04-03 20:53:28 +00005753
Dan Gohman475871a2008-07-27 21:46:04 +00005754SDValue
Dale Johannesen0f502f62009-02-03 22:26:09 +00005755X86TargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
Evan Cheng1887c1c2008-08-21 21:00:15 +00005756 SDValue Chain, SDValue Dst, SDValue Src,
5757 SDValue Size, unsigned Align,
5758 bool AlwaysInline,
5759 const Value *DstSV, uint64_t DstSVOff,
Scott Michelfdc40a02009-02-17 22:15:04 +00005760 const Value *SrcSV, uint64_t SrcSVOff) {
Dan Gohman707e0182008-04-12 04:36:06 +00005761 // This requires the copy size to be a constant, preferrably
5762 // within a subtarget-specific limit.
5763 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
5764 if (!ConstantSize)
Dan Gohman475871a2008-07-27 21:46:04 +00005765 return SDValue();
Dan Gohmanf5aeb1a2008-09-12 16:56:44 +00005766 uint64_t SizeVal = ConstantSize->getZExtValue();
Dan Gohman707e0182008-04-12 04:36:06 +00005767 if (!AlwaysInline && SizeVal > getSubtarget()->getMaxInlineSizeThreshold())
Dan Gohman475871a2008-07-27 21:46:04 +00005768 return SDValue();
Dan Gohman707e0182008-04-12 04:36:06 +00005769
Evan Cheng1887c1c2008-08-21 21:00:15 +00005770 /// If not DWORD aligned, call the library.
5771 if ((Align & 3) != 0)
5772 return SDValue();
5773
5774 // DWORD aligned
5775 MVT AVT = MVT::i32;
5776 if (Subtarget->is64Bit() && ((Align & 0x7) == 0)) // QWORD aligned
Dan Gohman707e0182008-04-12 04:36:06 +00005777 AVT = MVT::i64;
Evan Cheng0db9fe62006-04-25 20:13:52 +00005778
Duncan Sands83ec4b62008-06-06 12:08:01 +00005779 unsigned UBytes = AVT.getSizeInBits() / 8;
Dan Gohman707e0182008-04-12 04:36:06 +00005780 unsigned CountVal = SizeVal / UBytes;
Dan Gohman475871a2008-07-27 21:46:04 +00005781 SDValue Count = DAG.getIntPtrConstant(CountVal);
Evan Cheng1887c1c2008-08-21 21:00:15 +00005782 unsigned BytesLeft = SizeVal % UBytes;
Evan Cheng25ab6902006-09-08 06:48:29 +00005783
Dan Gohman475871a2008-07-27 21:46:04 +00005784 SDValue InFlag(0, 0);
Scott Michelfdc40a02009-02-17 22:15:04 +00005785 Chain = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RCX :
Dale Johannesen0f502f62009-02-03 22:26:09 +00005786 X86::ECX,
Evan Cheng25ab6902006-09-08 06:48:29 +00005787 Count, InFlag);
Evan Cheng0db9fe62006-04-25 20:13:52 +00005788 InFlag = Chain.getValue(1);
Scott Michelfdc40a02009-02-17 22:15:04 +00005789 Chain = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RDI :
Dale Johannesen0f502f62009-02-03 22:26:09 +00005790 X86::EDI,
Dan Gohman707e0182008-04-12 04:36:06 +00005791 Dst, InFlag);
Evan Cheng0db9fe62006-04-25 20:13:52 +00005792 InFlag = Chain.getValue(1);
Scott Michelfdc40a02009-02-17 22:15:04 +00005793 Chain = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RSI :
Dale Johannesen0f502f62009-02-03 22:26:09 +00005794 X86::ESI,
Dan Gohman707e0182008-04-12 04:36:06 +00005795 Src, InFlag);
Evan Cheng0db9fe62006-04-25 20:13:52 +00005796 InFlag = Chain.getValue(1);
5797
Chris Lattnerd96d0722007-02-25 06:40:16 +00005798 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
Dan Gohman475871a2008-07-27 21:46:04 +00005799 SmallVector<SDValue, 8> Ops;
Evan Cheng0db9fe62006-04-25 20:13:52 +00005800 Ops.push_back(Chain);
5801 Ops.push_back(DAG.getValueType(AVT));
5802 Ops.push_back(InFlag);
Dale Johannesen0f502f62009-02-03 22:26:09 +00005803 SDValue RepMovs = DAG.getNode(X86ISD::REP_MOVS, dl, Tys, &Ops[0], Ops.size());
Evan Cheng0db9fe62006-04-25 20:13:52 +00005804
Dan Gohman475871a2008-07-27 21:46:04 +00005805 SmallVector<SDValue, 4> Results;
Evan Cheng2749c722008-04-25 00:26:43 +00005806 Results.push_back(RepMovs);
Rafael Espindola068317b2007-09-28 12:53:01 +00005807 if (BytesLeft) {
Dan Gohman707e0182008-04-12 04:36:06 +00005808 // Handle the last 1 - 7 bytes.
5809 unsigned Offset = SizeVal - BytesLeft;
Duncan Sands83ec4b62008-06-06 12:08:01 +00005810 MVT DstVT = Dst.getValueType();
5811 MVT SrcVT = Src.getValueType();
5812 MVT SizeVT = Size.getValueType();
Scott Michelfdc40a02009-02-17 22:15:04 +00005813 Results.push_back(DAG.getMemcpy(Chain, dl,
Dale Johannesen0f502f62009-02-03 22:26:09 +00005814 DAG.getNode(ISD::ADD, dl, DstVT, Dst,
Evan Cheng2749c722008-04-25 00:26:43 +00005815 DAG.getConstant(Offset, DstVT)),
Dale Johannesen0f502f62009-02-03 22:26:09 +00005816 DAG.getNode(ISD::ADD, dl, SrcVT, Src,
Evan Cheng2749c722008-04-25 00:26:43 +00005817 DAG.getConstant(Offset, SrcVT)),
Dan Gohman707e0182008-04-12 04:36:06 +00005818 DAG.getConstant(BytesLeft, SizeVT),
5819 Align, AlwaysInline,
Dan Gohman1f13c682008-04-28 17:15:20 +00005820 DstSV, DstSVOff + Offset,
5821 SrcSV, SrcSVOff + Offset));
Evan Chengb067a1e2006-03-31 19:22:53 +00005822 }
Evan Cheng0db9fe62006-04-25 20:13:52 +00005823
Scott Michelfdc40a02009-02-17 22:15:04 +00005824 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
Dale Johannesen0f502f62009-02-03 22:26:09 +00005825 &Results[0], Results.size());
Evan Cheng0db9fe62006-04-25 20:13:52 +00005826}
5827
Dan Gohman475871a2008-07-27 21:46:04 +00005828SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) {
Dan Gohman69de1932008-02-06 22:27:42 +00005829 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
Dale Johannesen6f38cb62009-02-07 19:59:05 +00005830 DebugLoc dl = Op.getDebugLoc();
Evan Cheng8b2794a2006-10-13 21:14:26 +00005831
Evan Cheng25ab6902006-09-08 06:48:29 +00005832 if (!Subtarget->is64Bit()) {
5833 // vastart just stores the address of the VarArgsFrameIndex slot into the
5834 // memory location argument.
Dan Gohman475871a2008-07-27 21:46:04 +00005835 SDValue FR = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy());
Dale Johannesene4d209d2009-02-03 20:21:25 +00005836 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), SV, 0);
Evan Cheng25ab6902006-09-08 06:48:29 +00005837 }
5838
5839 // __va_list_tag:
5840 // gp_offset (0 - 6 * 8)
5841 // fp_offset (48 - 48 + 8 * 16)
5842 // overflow_arg_area (point to parameters coming in memory).
5843 // reg_save_area
Dan Gohman475871a2008-07-27 21:46:04 +00005844 SmallVector<SDValue, 8> MemOps;
5845 SDValue FIN = Op.getOperand(1);
Evan Cheng25ab6902006-09-08 06:48:29 +00005846 // Store gp_offset
Dale Johannesene4d209d2009-02-03 20:21:25 +00005847 SDValue Store = DAG.getStore(Op.getOperand(0), dl,
Evan Cheng786225a2006-10-05 23:01:46 +00005848 DAG.getConstant(VarArgsGPOffset, MVT::i32),
Dan Gohman69de1932008-02-06 22:27:42 +00005849 FIN, SV, 0);
Evan Cheng25ab6902006-09-08 06:48:29 +00005850 MemOps.push_back(Store);
5851
5852 // Store fp_offset
Scott Michelfdc40a02009-02-17 22:15:04 +00005853 FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(),
Dale Johannesene4d209d2009-02-03 20:21:25 +00005854 FIN, DAG.getIntPtrConstant(4));
5855 Store = DAG.getStore(Op.getOperand(0), dl,
Evan Cheng786225a2006-10-05 23:01:46 +00005856 DAG.getConstant(VarArgsFPOffset, MVT::i32),
Dan Gohman69de1932008-02-06 22:27:42 +00005857 FIN, SV, 0);
Evan Cheng25ab6902006-09-08 06:48:29 +00005858 MemOps.push_back(Store);
5859
5860 // Store ptr to overflow_arg_area
Scott Michelfdc40a02009-02-17 22:15:04 +00005861 FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(),
Dale Johannesene4d209d2009-02-03 20:21:25 +00005862 FIN, DAG.getIntPtrConstant(4));
Dan Gohman475871a2008-07-27 21:46:04 +00005863 SDValue OVFIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy());
Dale Johannesene4d209d2009-02-03 20:21:25 +00005864 Store = DAG.getStore(Op.getOperand(0), dl, OVFIN, FIN, SV, 0);
Evan Cheng25ab6902006-09-08 06:48:29 +00005865 MemOps.push_back(Store);
5866
5867 // Store ptr to reg_save_area.
Scott Michelfdc40a02009-02-17 22:15:04 +00005868 FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(),
Dale Johannesene4d209d2009-02-03 20:21:25 +00005869 FIN, DAG.getIntPtrConstant(8));
Dan Gohman475871a2008-07-27 21:46:04 +00005870 SDValue RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy());
Dale Johannesene4d209d2009-02-03 20:21:25 +00005871 Store = DAG.getStore(Op.getOperand(0), dl, RSFIN, FIN, SV, 0);
Evan Cheng25ab6902006-09-08 06:48:29 +00005872 MemOps.push_back(Store);
Scott Michelfdc40a02009-02-17 22:15:04 +00005873 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
Dale Johannesene4d209d2009-02-03 20:21:25 +00005874 &MemOps[0], MemOps.size());
Evan Cheng0db9fe62006-04-25 20:13:52 +00005875}
5876
Dan Gohman475871a2008-07-27 21:46:04 +00005877SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) {
Dan Gohman9018e832008-05-10 01:26:14 +00005878 // X86-64 va_list is a struct { i32, i32, i8*, i8* }.
5879 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_arg!");
Dan Gohman475871a2008-07-27 21:46:04 +00005880 SDValue Chain = Op.getOperand(0);
5881 SDValue SrcPtr = Op.getOperand(1);
5882 SDValue SrcSV = Op.getOperand(2);
Dan Gohman9018e832008-05-10 01:26:14 +00005883
5884 assert(0 && "VAArgInst is not yet implemented for x86-64!");
5885 abort();
Dan Gohman475871a2008-07-27 21:46:04 +00005886 return SDValue();
Dan Gohman9018e832008-05-10 01:26:14 +00005887}
5888
Dan Gohman475871a2008-07-27 21:46:04 +00005889SDValue X86TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) {
Evan Chengae642192007-03-02 23:16:35 +00005890 // X86-64 va_list is a struct { i32, i32, i8*, i8* }.
Dan Gohman28269132008-04-18 20:55:41 +00005891 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!");
Dan Gohman475871a2008-07-27 21:46:04 +00005892 SDValue Chain = Op.getOperand(0);
5893 SDValue DstPtr = Op.getOperand(1);
5894 SDValue SrcPtr = Op.getOperand(2);
Dan Gohman69de1932008-02-06 22:27:42 +00005895 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
5896 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
Dale Johannesen6f38cb62009-02-07 19:59:05 +00005897 DebugLoc dl = Op.getDebugLoc();
Evan Chengae642192007-03-02 23:16:35 +00005898
Dale Johannesendd64c412009-02-04 00:33:20 +00005899 return DAG.getMemcpy(Chain, dl, DstPtr, SrcPtr,
Dan Gohman28269132008-04-18 20:55:41 +00005900 DAG.getIntPtrConstant(24), 8, false,
5901 DstSV, 0, SrcSV, 0);
Evan Chengae642192007-03-02 23:16:35 +00005902}
5903
Dan Gohman475871a2008-07-27 21:46:04 +00005904SDValue
5905X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
Dale Johannesen6f38cb62009-02-07 19:59:05 +00005906 DebugLoc dl = Op.getDebugLoc();
Dan Gohmanf5aeb1a2008-09-12 16:56:44 +00005907 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
Evan Cheng0db9fe62006-04-25 20:13:52 +00005908 switch (IntNo) {
Dan Gohman475871a2008-07-27 21:46:04 +00005909 default: return SDValue(); // Don't custom lower most intrinsics.
Evan Cheng5759f972008-05-04 09:15:50 +00005910 // Comparison intrinsics.
Evan Cheng0db9fe62006-04-25 20:13:52 +00005911 case Intrinsic::x86_sse_comieq_ss:
5912 case Intrinsic::x86_sse_comilt_ss:
5913 case Intrinsic::x86_sse_comile_ss:
5914 case Intrinsic::x86_sse_comigt_ss:
5915 case Intrinsic::x86_sse_comige_ss:
5916 case Intrinsic::x86_sse_comineq_ss:
5917 case Intrinsic::x86_sse_ucomieq_ss:
5918 case Intrinsic::x86_sse_ucomilt_ss:
5919 case Intrinsic::x86_sse_ucomile_ss:
5920 case Intrinsic::x86_sse_ucomigt_ss:
5921 case Intrinsic::x86_sse_ucomige_ss:
5922 case Intrinsic::x86_sse_ucomineq_ss:
5923 case Intrinsic::x86_sse2_comieq_sd:
5924 case Intrinsic::x86_sse2_comilt_sd:
5925 case Intrinsic::x86_sse2_comile_sd:
5926 case Intrinsic::x86_sse2_comigt_sd:
5927 case Intrinsic::x86_sse2_comige_sd:
5928 case Intrinsic::x86_sse2_comineq_sd:
5929 case Intrinsic::x86_sse2_ucomieq_sd:
5930 case Intrinsic::x86_sse2_ucomilt_sd:
5931 case Intrinsic::x86_sse2_ucomile_sd:
5932 case Intrinsic::x86_sse2_ucomigt_sd:
5933 case Intrinsic::x86_sse2_ucomige_sd:
5934 case Intrinsic::x86_sse2_ucomineq_sd: {
5935 unsigned Opc = 0;
5936 ISD::CondCode CC = ISD::SETCC_INVALID;
5937 switch (IntNo) {
5938 default: break;
Anton Korobeynikov12c49af2006-11-21 00:01:06 +00005939 case Intrinsic::x86_sse_comieq_ss:
5940 case Intrinsic::x86_sse2_comieq_sd:
Evan Cheng0db9fe62006-04-25 20:13:52 +00005941 Opc = X86ISD::COMI;
5942 CC = ISD::SETEQ;
5943 break;
Evan Cheng6be2c582006-04-05 23:38:46 +00005944 case Intrinsic::x86_sse_comilt_ss:
Evan Cheng6be2c582006-04-05 23:38:46 +00005945 case Intrinsic::x86_sse2_comilt_sd:
Evan Cheng0db9fe62006-04-25 20:13:52 +00005946 Opc = X86ISD::COMI;
5947 CC = ISD::SETLT;
5948 break;
5949 case Intrinsic::x86_sse_comile_ss:
Evan Cheng6be2c582006-04-05 23:38:46 +00005950 case Intrinsic::x86_sse2_comile_sd:
Evan Cheng0db9fe62006-04-25 20:13:52 +00005951 Opc = X86ISD::COMI;
5952 CC = ISD::SETLE;
5953 break;
5954 case Intrinsic::x86_sse_comigt_ss:
Evan Cheng6be2c582006-04-05 23:38:46 +00005955 case Intrinsic::x86_sse2_comigt_sd:
Evan Cheng0db9fe62006-04-25 20:13:52 +00005956 Opc = X86ISD::COMI;
5957 CC = ISD::SETGT;
5958 break;
5959 case Intrinsic::x86_sse_comige_ss:
Evan Cheng6be2c582006-04-05 23:38:46 +00005960 case Intrinsic::x86_sse2_comige_sd:
Evan Cheng0db9fe62006-04-25 20:13:52 +00005961 Opc = X86ISD::COMI;
5962 CC = ISD::SETGE;
5963 break;
5964 case Intrinsic::x86_sse_comineq_ss:
Evan Cheng6be2c582006-04-05 23:38:46 +00005965 case Intrinsic::x86_sse2_comineq_sd:
Evan Cheng0db9fe62006-04-25 20:13:52 +00005966 Opc = X86ISD::COMI;
5967 CC = ISD::SETNE;
5968 break;
5969 case Intrinsic::x86_sse_ucomieq_ss:
Evan Cheng6be2c582006-04-05 23:38:46 +00005970 case Intrinsic::x86_sse2_ucomieq_sd:
Evan Cheng0db9fe62006-04-25 20:13:52 +00005971 Opc = X86ISD::UCOMI;
5972 CC = ISD::SETEQ;
5973 break;
5974 case Intrinsic::x86_sse_ucomilt_ss:
Evan Cheng6be2c582006-04-05 23:38:46 +00005975 case Intrinsic::x86_sse2_ucomilt_sd:
Evan Cheng0db9fe62006-04-25 20:13:52 +00005976 Opc = X86ISD::UCOMI;
5977 CC = ISD::SETLT;
5978 break;
5979 case Intrinsic::x86_sse_ucomile_ss:
Evan Cheng6be2c582006-04-05 23:38:46 +00005980 case Intrinsic::x86_sse2_ucomile_sd:
Evan Cheng0db9fe62006-04-25 20:13:52 +00005981 Opc = X86ISD::UCOMI;
5982 CC = ISD::SETLE;
5983 break;
5984 case Intrinsic::x86_sse_ucomigt_ss:
Evan Cheng6be2c582006-04-05 23:38:46 +00005985 case Intrinsic::x86_sse2_ucomigt_sd:
Evan Cheng0db9fe62006-04-25 20:13:52 +00005986 Opc = X86ISD::UCOMI;
5987 CC = ISD::SETGT;
5988 break;
5989 case Intrinsic::x86_sse_ucomige_ss:
Evan Cheng6be2c582006-04-05 23:38:46 +00005990 case Intrinsic::x86_sse2_ucomige_sd:
Evan Cheng0db9fe62006-04-25 20:13:52 +00005991 Opc = X86ISD::UCOMI;
5992 CC = ISD::SETGE;
5993 break;
5994 case Intrinsic::x86_sse_ucomineq_ss:
5995 case Intrinsic::x86_sse2_ucomineq_sd:
5996 Opc = X86ISD::UCOMI;
5997 CC = ISD::SETNE;
5998 break;
Evan Cheng6be2c582006-04-05 23:38:46 +00005999 }
Evan Cheng734503b2006-09-11 02:19:56 +00006000
Dan Gohman475871a2008-07-27 21:46:04 +00006001 SDValue LHS = Op.getOperand(1);
6002 SDValue RHS = Op.getOperand(2);
Chris Lattner1c39d4c2008-12-24 23:53:05 +00006003 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG);
Dale Johannesene4d209d2009-02-03 20:21:25 +00006004 SDValue Cond = DAG.getNode(Opc, dl, MVT::i32, LHS, RHS);
6005 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
Evan Cheng0ac3fc22008-08-17 19:22:34 +00006006 DAG.getConstant(X86CC, MVT::i8), Cond);
Dale Johannesene4d209d2009-02-03 20:21:25 +00006007 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
Evan Cheng6be2c582006-04-05 23:38:46 +00006008 }
Evan Cheng5759f972008-05-04 09:15:50 +00006009
6010 // Fix vector shift instructions where the last operand is a non-immediate
6011 // i32 value.
6012 case Intrinsic::x86_sse2_pslli_w:
6013 case Intrinsic::x86_sse2_pslli_d:
6014 case Intrinsic::x86_sse2_pslli_q:
6015 case Intrinsic::x86_sse2_psrli_w:
6016 case Intrinsic::x86_sse2_psrli_d:
6017 case Intrinsic::x86_sse2_psrli_q:
6018 case Intrinsic::x86_sse2_psrai_w:
6019 case Intrinsic::x86_sse2_psrai_d:
6020 case Intrinsic::x86_mmx_pslli_w:
6021 case Intrinsic::x86_mmx_pslli_d:
6022 case Intrinsic::x86_mmx_pslli_q:
6023 case Intrinsic::x86_mmx_psrli_w:
6024 case Intrinsic::x86_mmx_psrli_d:
6025 case Intrinsic::x86_mmx_psrli_q:
6026 case Intrinsic::x86_mmx_psrai_w:
6027 case Intrinsic::x86_mmx_psrai_d: {
Dan Gohman475871a2008-07-27 21:46:04 +00006028 SDValue ShAmt = Op.getOperand(2);
Evan Cheng5759f972008-05-04 09:15:50 +00006029 if (isa<ConstantSDNode>(ShAmt))
Dan Gohman475871a2008-07-27 21:46:04 +00006030 return SDValue();
Evan Cheng5759f972008-05-04 09:15:50 +00006031
6032 unsigned NewIntNo = 0;
Duncan Sands83ec4b62008-06-06 12:08:01 +00006033 MVT ShAmtVT = MVT::v4i32;
Evan Cheng5759f972008-05-04 09:15:50 +00006034 switch (IntNo) {
6035 case Intrinsic::x86_sse2_pslli_w:
6036 NewIntNo = Intrinsic::x86_sse2_psll_w;
6037 break;
6038 case Intrinsic::x86_sse2_pslli_d:
6039 NewIntNo = Intrinsic::x86_sse2_psll_d;
6040 break;
6041 case Intrinsic::x86_sse2_pslli_q:
6042 NewIntNo = Intrinsic::x86_sse2_psll_q;
6043 break;
6044 case Intrinsic::x86_sse2_psrli_w:
6045 NewIntNo = Intrinsic::x86_sse2_psrl_w;
6046 break;
6047 case Intrinsic::x86_sse2_psrli_d:
6048 NewIntNo = Intrinsic::x86_sse2_psrl_d;
6049 break;
6050 case Intrinsic::x86_sse2_psrli_q:
6051 NewIntNo = Intrinsic::x86_sse2_psrl_q;
6052 break;
6053 case Intrinsic::x86_sse2_psrai_w:
6054 NewIntNo = Intrinsic::x86_sse2_psra_w;
6055 break;
6056 case Intrinsic::x86_sse2_psrai_d:
6057 NewIntNo = Intrinsic::x86_sse2_psra_d;
6058 break;
6059 default: {
6060 ShAmtVT = MVT::v2i32;
6061 switch (IntNo) {
6062 case Intrinsic::x86_mmx_pslli_w:
6063 NewIntNo = Intrinsic::x86_mmx_psll_w;
6064 break;
6065 case Intrinsic::x86_mmx_pslli_d:
6066 NewIntNo = Intrinsic::x86_mmx_psll_d;
6067 break;
6068 case Intrinsic::x86_mmx_pslli_q:
6069 NewIntNo = Intrinsic::x86_mmx_psll_q;
6070 break;
6071 case Intrinsic::x86_mmx_psrli_w:
6072 NewIntNo = Intrinsic::x86_mmx_psrl_w;
6073 break;
6074 case Intrinsic::x86_mmx_psrli_d:
6075 NewIntNo = Intrinsic::x86_mmx_psrl_d;
6076 break;
6077 case Intrinsic::x86_mmx_psrli_q:
6078 NewIntNo = Intrinsic::x86_mmx_psrl_q;
6079 break;
6080 case Intrinsic::x86_mmx_psrai_w:
6081 NewIntNo = Intrinsic::x86_mmx_psra_w;
6082 break;
6083 case Intrinsic::x86_mmx_psrai_d:
6084 NewIntNo = Intrinsic::x86_mmx_psra_d;
6085 break;
6086 default: abort(); // Can't reach here.
6087 }
6088 break;
6089 }
6090 }
Duncan Sands83ec4b62008-06-06 12:08:01 +00006091 MVT VT = Op.getValueType();
Dale Johannesene4d209d2009-02-03 20:21:25 +00006092 ShAmt = DAG.getNode(ISD::BIT_CONVERT, dl, VT,
6093 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShAmtVT, ShAmt));
6094 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
Evan Cheng5759f972008-05-04 09:15:50 +00006095 DAG.getConstant(NewIntNo, MVT::i32),
6096 Op.getOperand(1), ShAmt);
6097 }
Evan Cheng38bcbaf2005-12-23 07:31:11 +00006098 }
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +00006099}
Evan Cheng72261582005-12-20 06:22:03 +00006100
Dan Gohman475871a2008-07-27 21:46:04 +00006101SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) {
Bill Wendling64e87322009-01-16 19:25:27 +00006102 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
Dale Johannesen6f38cb62009-02-07 19:59:05 +00006103 DebugLoc dl = Op.getDebugLoc();
Bill Wendling64e87322009-01-16 19:25:27 +00006104
6105 if (Depth > 0) {
6106 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
6107 SDValue Offset =
6108 DAG.getConstant(TD->getPointerSize(),
6109 Subtarget->is64Bit() ? MVT::i64 : MVT::i32);
Dale Johannesene4d209d2009-02-03 20:21:25 +00006110 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
Scott Michelfdc40a02009-02-17 22:15:04 +00006111 DAG.getNode(ISD::ADD, dl, getPointerTy(),
Dale Johannesene4d209d2009-02-03 20:21:25 +00006112 FrameAddr, Offset),
Bill Wendling64e87322009-01-16 19:25:27 +00006113 NULL, 0);
6114 }
6115
6116 // Just load the return address.
Dan Gohman475871a2008-07-27 21:46:04 +00006117 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
Scott Michelfdc40a02009-02-17 22:15:04 +00006118 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
Dale Johannesene4d209d2009-02-03 20:21:25 +00006119 RetAddrFI, NULL, 0);
Nate Begemanbcc5f362007-01-29 22:58:52 +00006120}
6121
Dan Gohman475871a2008-07-27 21:46:04 +00006122SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) {
Evan Cheng184793f2008-09-27 01:56:22 +00006123 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
6124 MFI->setFrameAddressIsTaken(true);
6125 MVT VT = Op.getValueType();
Dale Johannesen6f38cb62009-02-07 19:59:05 +00006126 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful
Evan Cheng184793f2008-09-27 01:56:22 +00006127 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
6128 unsigned FrameReg = Subtarget->is64Bit() ? X86::RBP : X86::EBP;
Dale Johannesendd64c412009-02-04 00:33:20 +00006129 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
Evan Cheng184793f2008-09-27 01:56:22 +00006130 while (Depth--)
Dale Johannesendd64c412009-02-04 00:33:20 +00006131 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, NULL, 0);
Evan Cheng184793f2008-09-27 01:56:22 +00006132 return FrameAddr;
Nate Begemanbcc5f362007-01-29 22:58:52 +00006133}
6134
Dan Gohman475871a2008-07-27 21:46:04 +00006135SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
Anton Korobeynikov260a6b82008-09-08 21:12:11 +00006136 SelectionDAG &DAG) {
Anton Korobeynikovbff66b02008-09-09 18:22:57 +00006137 return DAG.getIntPtrConstant(2*TD->getPointerSize());
Anton Korobeynikov2365f512007-07-14 14:06:15 +00006138}
6139
Dan Gohman475871a2008-07-27 21:46:04 +00006140SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG)
Anton Korobeynikov2365f512007-07-14 14:06:15 +00006141{
Anton Korobeynikov2365f512007-07-14 14:06:15 +00006142 MachineFunction &MF = DAG.getMachineFunction();
Dan Gohman475871a2008-07-27 21:46:04 +00006143 SDValue Chain = Op.getOperand(0);
6144 SDValue Offset = Op.getOperand(1);
6145 SDValue Handler = Op.getOperand(2);
Dale Johannesen6f38cb62009-02-07 19:59:05 +00006146 DebugLoc dl = Op.getDebugLoc();
Anton Korobeynikov2365f512007-07-14 14:06:15 +00006147
Anton Korobeynikovb84c1672008-09-08 21:12:47 +00006148 SDValue Frame = DAG.getRegister(Subtarget->is64Bit() ? X86::RBP : X86::EBP,
6149 getPointerTy());
6150 unsigned StoreAddrReg = (Subtarget->is64Bit() ? X86::RCX : X86::ECX);
Anton Korobeynikov2365f512007-07-14 14:06:15 +00006151
Dale Johannesene4d209d2009-02-03 20:21:25 +00006152 SDValue StoreAddr = DAG.getNode(ISD::SUB, dl, getPointerTy(), Frame,
Anton Korobeynikovbff66b02008-09-09 18:22:57 +00006153 DAG.getIntPtrConstant(-TD->getPointerSize()));
Dale Johannesene4d209d2009-02-03 20:21:25 +00006154 StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StoreAddr, Offset);
6155 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, NULL, 0);
Dale Johannesendd64c412009-02-04 00:33:20 +00006156 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
Anton Korobeynikovb84c1672008-09-08 21:12:47 +00006157 MF.getRegInfo().addLiveOut(StoreAddrReg);
Anton Korobeynikov2365f512007-07-14 14:06:15 +00006158
Dale Johannesene4d209d2009-02-03 20:21:25 +00006159 return DAG.getNode(X86ISD::EH_RETURN, dl,
Anton Korobeynikovb84c1672008-09-08 21:12:47 +00006160 MVT::Other,
6161 Chain, DAG.getRegister(StoreAddrReg, getPointerTy()));
Anton Korobeynikov2365f512007-07-14 14:06:15 +00006162}
6163
Dan Gohman475871a2008-07-27 21:46:04 +00006164SDValue X86TargetLowering::LowerTRAMPOLINE(SDValue Op,
Duncan Sandsb116fac2007-07-27 20:02:49 +00006165 SelectionDAG &DAG) {
Dan Gohman475871a2008-07-27 21:46:04 +00006166 SDValue Root = Op.getOperand(0);
6167 SDValue Trmp = Op.getOperand(1); // trampoline
6168 SDValue FPtr = Op.getOperand(2); // nested function
6169 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
Dale Johannesen6f38cb62009-02-07 19:59:05 +00006170 DebugLoc dl = Op.getDebugLoc();
Duncan Sandsb116fac2007-07-27 20:02:49 +00006171
Dan Gohman69de1932008-02-06 22:27:42 +00006172 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
Duncan Sandsb116fac2007-07-27 20:02:49 +00006173
Duncan Sands339e14f2008-01-16 22:55:25 +00006174 const X86InstrInfo *TII =
6175 ((X86TargetMachine&)getTargetMachine()).getInstrInfo();
6176
Duncan Sandsb116fac2007-07-27 20:02:49 +00006177 if (Subtarget->is64Bit()) {
Dan Gohman475871a2008-07-27 21:46:04 +00006178 SDValue OutChains[6];
Duncan Sands339e14f2008-01-16 22:55:25 +00006179
6180 // Large code-model.
6181
6182 const unsigned char JMP64r = TII->getBaseOpcodeFor(X86::JMP64r);
6183 const unsigned char MOV64ri = TII->getBaseOpcodeFor(X86::MOV64ri);
6184
Dan Gohmanc9f5f3f2008-05-14 01:58:56 +00006185 const unsigned char N86R10 = RegInfo->getX86RegNum(X86::R10);
6186 const unsigned char N86R11 = RegInfo->getX86RegNum(X86::R11);
Duncan Sands339e14f2008-01-16 22:55:25 +00006187
6188 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
6189
6190 // Load the pointer to the nested function into R11.
6191 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
Dan Gohman475871a2008-07-27 21:46:04 +00006192 SDValue Addr = Trmp;
Dale Johannesene4d209d2009-02-03 20:21:25 +00006193 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
6194 Addr, TrmpAddr, 0);
Duncan Sands339e14f2008-01-16 22:55:25 +00006195
Scott Michelfdc40a02009-02-17 22:15:04 +00006196 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
Dale Johannesene4d209d2009-02-03 20:21:25 +00006197 DAG.getConstant(2, MVT::i64));
6198 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr, TrmpAddr, 2, false, 2);
Duncan Sands339e14f2008-01-16 22:55:25 +00006199
6200 // Load the 'nest' parameter value into R10.
6201 // R10 is specified in X86CallingConv.td
6202 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
Scott Michelfdc40a02009-02-17 22:15:04 +00006203 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
Dale Johannesene4d209d2009-02-03 20:21:25 +00006204 DAG.getConstant(10, MVT::i64));
6205 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
6206 Addr, TrmpAddr, 10);
Duncan Sands339e14f2008-01-16 22:55:25 +00006207
Scott Michelfdc40a02009-02-17 22:15:04 +00006208 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
Dale Johannesene4d209d2009-02-03 20:21:25 +00006209 DAG.getConstant(12, MVT::i64));
6210 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr, TrmpAddr, 12, false, 2);
Duncan Sands339e14f2008-01-16 22:55:25 +00006211
6212 // Jump to the nested function.
6213 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
Scott Michelfdc40a02009-02-17 22:15:04 +00006214 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
Dale Johannesene4d209d2009-02-03 20:21:25 +00006215 DAG.getConstant(20, MVT::i64));
6216 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
6217 Addr, TrmpAddr, 20);
Duncan Sands339e14f2008-01-16 22:55:25 +00006218
6219 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
Scott Michelfdc40a02009-02-17 22:15:04 +00006220 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
Dale Johannesene4d209d2009-02-03 20:21:25 +00006221 DAG.getConstant(22, MVT::i64));
6222 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr,
Dan Gohman69de1932008-02-06 22:27:42 +00006223 TrmpAddr, 22);
Duncan Sands339e14f2008-01-16 22:55:25 +00006224
Dan Gohman475871a2008-07-27 21:46:04 +00006225 SDValue Ops[] =
Dale Johannesene4d209d2009-02-03 20:21:25 +00006226 { Trmp, DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 6) };
6227 return DAG.getMergeValues(Ops, 2, dl);
Duncan Sandsb116fac2007-07-27 20:02:49 +00006228 } else {
Dan Gohmanbbfb9c52008-01-31 01:01:48 +00006229 const Function *Func =
Duncan Sandsb116fac2007-07-27 20:02:49 +00006230 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
6231 unsigned CC = Func->getCallingConv();
Duncan Sandsee465742007-08-29 19:01:20 +00006232 unsigned NestReg;
Duncan Sandsb116fac2007-07-27 20:02:49 +00006233
6234 switch (CC) {
6235 default:
6236 assert(0 && "Unsupported calling convention");
6237 case CallingConv::C:
Duncan Sandsb116fac2007-07-27 20:02:49 +00006238 case CallingConv::X86_StdCall: {
6239 // Pass 'nest' parameter in ECX.
6240 // Must be kept in sync with X86CallingConv.td
Duncan Sandsee465742007-08-29 19:01:20 +00006241 NestReg = X86::ECX;
Duncan Sandsb116fac2007-07-27 20:02:49 +00006242
6243 // Check that ECX wasn't needed by an 'inreg' parameter.
6244 const FunctionType *FTy = Func->getFunctionType();
Devang Patel05988662008-09-25 21:00:45 +00006245 const AttrListPtr &Attrs = Func->getAttributes();
Duncan Sandsb116fac2007-07-27 20:02:49 +00006246
Chris Lattner58d74912008-03-12 17:45:29 +00006247 if (!Attrs.isEmpty() && !Func->isVarArg()) {
Duncan Sandsb116fac2007-07-27 20:02:49 +00006248 unsigned InRegCount = 0;
6249 unsigned Idx = 1;
6250
6251 for (FunctionType::param_iterator I = FTy->param_begin(),
6252 E = FTy->param_end(); I != E; ++I, ++Idx)
Devang Patel05988662008-09-25 21:00:45 +00006253 if (Attrs.paramHasAttr(Idx, Attribute::InReg))
Duncan Sandsb116fac2007-07-27 20:02:49 +00006254 // FIXME: should only count parameters that are lowered to integers.
Anton Korobeynikovbff66b02008-09-09 18:22:57 +00006255 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32;
Duncan Sandsb116fac2007-07-27 20:02:49 +00006256
6257 if (InRegCount > 2) {
6258 cerr << "Nest register in use - reduce number of inreg parameters!\n";
6259 abort();
6260 }
6261 }
6262 break;
6263 }
6264 case CallingConv::X86_FastCall:
Duncan Sandsbf53c292008-09-10 13:22:10 +00006265 case CallingConv::Fast:
Duncan Sandsb116fac2007-07-27 20:02:49 +00006266 // Pass 'nest' parameter in EAX.
6267 // Must be kept in sync with X86CallingConv.td
Duncan Sandsee465742007-08-29 19:01:20 +00006268 NestReg = X86::EAX;
Duncan Sandsb116fac2007-07-27 20:02:49 +00006269 break;
6270 }
6271
Dan Gohman475871a2008-07-27 21:46:04 +00006272 SDValue OutChains[4];
6273 SDValue Addr, Disp;
Duncan Sandsb116fac2007-07-27 20:02:49 +00006274
Scott Michelfdc40a02009-02-17 22:15:04 +00006275 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
Dale Johannesene4d209d2009-02-03 20:21:25 +00006276 DAG.getConstant(10, MVT::i32));
6277 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
Duncan Sandsb116fac2007-07-27 20:02:49 +00006278
Duncan Sands339e14f2008-01-16 22:55:25 +00006279 const unsigned char MOV32ri = TII->getBaseOpcodeFor(X86::MOV32ri);
Dan Gohmanc9f5f3f2008-05-14 01:58:56 +00006280 const unsigned char N86Reg = RegInfo->getX86RegNum(NestReg);
Scott Michelfdc40a02009-02-17 22:15:04 +00006281 OutChains[0] = DAG.getStore(Root, dl,
Dale Johannesene4d209d2009-02-03 20:21:25 +00006282 DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
Dan Gohman69de1932008-02-06 22:27:42 +00006283 Trmp, TrmpAddr, 0);
Duncan Sandsb116fac2007-07-27 20:02:49 +00006284
Scott Michelfdc40a02009-02-17 22:15:04 +00006285 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
Dale Johannesene4d209d2009-02-03 20:21:25 +00006286 DAG.getConstant(1, MVT::i32));
6287 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr, TrmpAddr, 1, false, 1);
Duncan Sandsb116fac2007-07-27 20:02:49 +00006288
Duncan Sands339e14f2008-01-16 22:55:25 +00006289 const unsigned char JMP = TII->getBaseOpcodeFor(X86::JMP);
Scott Michelfdc40a02009-02-17 22:15:04 +00006290 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
Dale Johannesene4d209d2009-02-03 20:21:25 +00006291 DAG.getConstant(5, MVT::i32));
6292 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr,
Dan Gohman69de1932008-02-06 22:27:42 +00006293 TrmpAddr, 5, false, 1);
Duncan Sandsb116fac2007-07-27 20:02:49 +00006294
Scott Michelfdc40a02009-02-17 22:15:04 +00006295 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
Dale Johannesene4d209d2009-02-03 20:21:25 +00006296 DAG.getConstant(6, MVT::i32));
6297 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr, TrmpAddr, 6, false, 1);
Duncan Sandsb116fac2007-07-27 20:02:49 +00006298
Dan Gohman475871a2008-07-27 21:46:04 +00006299 SDValue Ops[] =
Dale Johannesene4d209d2009-02-03 20:21:25 +00006300 { Trmp, DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 4) };
6301 return DAG.getMergeValues(Ops, 2, dl);
Duncan Sandsb116fac2007-07-27 20:02:49 +00006302 }
6303}
6304
Dan Gohman475871a2008-07-27 21:46:04 +00006305SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) {
Anton Korobeynikov45b22fa2007-11-16 01:31:51 +00006306 /*
6307 The rounding mode is in bits 11:10 of FPSR, and has the following
6308 settings:
6309 00 Round to nearest
6310 01 Round to -inf
6311 10 Round to +inf
6312 11 Round to 0
6313
6314 FLT_ROUNDS, on the other hand, expects the following:
6315 -1 Undefined
6316 0 Round to 0
6317 1 Round to nearest
6318 2 Round to +inf
6319 3 Round to -inf
6320
6321 To perform the conversion, we do:
6322 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
6323 */
6324
6325 MachineFunction &MF = DAG.getMachineFunction();
6326 const TargetMachine &TM = MF.getTarget();
6327 const TargetFrameInfo &TFI = *TM.getFrameInfo();
6328 unsigned StackAlignment = TFI.getStackAlignment();
Duncan Sands83ec4b62008-06-06 12:08:01 +00006329 MVT VT = Op.getValueType();
Dale Johannesen6f38cb62009-02-07 19:59:05 +00006330 DebugLoc dl = Op.getDebugLoc();
Anton Korobeynikov45b22fa2007-11-16 01:31:51 +00006331
6332 // Save FP Control Word to stack slot
6333 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment);
Dan Gohman475871a2008-07-27 21:46:04 +00006334 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
Anton Korobeynikov45b22fa2007-11-16 01:31:51 +00006335
Dale Johannesene4d209d2009-02-03 20:21:25 +00006336 SDValue Chain = DAG.getNode(X86ISD::FNSTCW16m, dl, MVT::Other,
Evan Cheng8a186ae2008-09-24 23:26:36 +00006337 DAG.getEntryNode(), StackSlot);
Anton Korobeynikov45b22fa2007-11-16 01:31:51 +00006338
6339 // Load FP Control Word from stack slot
Dale Johannesene4d209d2009-02-03 20:21:25 +00006340 SDValue CWD = DAG.getLoad(MVT::i16, dl, Chain, StackSlot, NULL, 0);
Anton Korobeynikov45b22fa2007-11-16 01:31:51 +00006341
6342 // Transform as necessary
Dan Gohman475871a2008-07-27 21:46:04 +00006343 SDValue CWD1 =
Dale Johannesene4d209d2009-02-03 20:21:25 +00006344 DAG.getNode(ISD::SRL, dl, MVT::i16,
6345 DAG.getNode(ISD::AND, dl, MVT::i16,
Anton Korobeynikov45b22fa2007-11-16 01:31:51 +00006346 CWD, DAG.getConstant(0x800, MVT::i16)),
6347 DAG.getConstant(11, MVT::i8));
Dan Gohman475871a2008-07-27 21:46:04 +00006348 SDValue CWD2 =
Dale Johannesene4d209d2009-02-03 20:21:25 +00006349 DAG.getNode(ISD::SRL, dl, MVT::i16,
6350 DAG.getNode(ISD::AND, dl, MVT::i16,
Anton Korobeynikov45b22fa2007-11-16 01:31:51 +00006351 CWD, DAG.getConstant(0x400, MVT::i16)),
6352 DAG.getConstant(9, MVT::i8));
6353
Dan Gohman475871a2008-07-27 21:46:04 +00006354 SDValue RetVal =
Dale Johannesene4d209d2009-02-03 20:21:25 +00006355 DAG.getNode(ISD::AND, dl, MVT::i16,
6356 DAG.getNode(ISD::ADD, dl, MVT::i16,
6357 DAG.getNode(ISD::OR, dl, MVT::i16, CWD1, CWD2),
Anton Korobeynikov45b22fa2007-11-16 01:31:51 +00006358 DAG.getConstant(1, MVT::i16)),
6359 DAG.getConstant(3, MVT::i16));
6360
6361
Duncan Sands83ec4b62008-06-06 12:08:01 +00006362 return DAG.getNode((VT.getSizeInBits() < 16 ?
Dale Johannesenb300d2a2009-02-07 00:55:49 +00006363 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal);
Anton Korobeynikov45b22fa2007-11-16 01:31:51 +00006364}
6365
Dan Gohman475871a2008-07-27 21:46:04 +00006366SDValue X86TargetLowering::LowerCTLZ(SDValue Op, SelectionDAG &DAG) {
Duncan Sands83ec4b62008-06-06 12:08:01 +00006367 MVT VT = Op.getValueType();
6368 MVT OpVT = VT;
6369 unsigned NumBits = VT.getSizeInBits();
Dale Johannesen6f38cb62009-02-07 19:59:05 +00006370 DebugLoc dl = Op.getDebugLoc();
Evan Cheng18efe262007-12-14 02:13:44 +00006371
6372 Op = Op.getOperand(0);
6373 if (VT == MVT::i8) {
Evan Cheng152804e2007-12-14 08:30:15 +00006374 // Zero extend to i32 since there is not an i8 bsr.
Evan Cheng18efe262007-12-14 02:13:44 +00006375 OpVT = MVT::i32;
Dale Johannesene4d209d2009-02-03 20:21:25 +00006376 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
Evan Cheng18efe262007-12-14 02:13:44 +00006377 }
Evan Cheng18efe262007-12-14 02:13:44 +00006378
Evan Cheng152804e2007-12-14 08:30:15 +00006379 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
6380 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
Dale Johannesene4d209d2009-02-03 20:21:25 +00006381 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
Evan Cheng152804e2007-12-14 08:30:15 +00006382
6383 // If src is zero (i.e. bsr sets ZF), returns NumBits.
Dan Gohman475871a2008-07-27 21:46:04 +00006384 SmallVector<SDValue, 4> Ops;
Evan Cheng152804e2007-12-14 08:30:15 +00006385 Ops.push_back(Op);
6386 Ops.push_back(DAG.getConstant(NumBits+NumBits-1, OpVT));
6387 Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8));
6388 Ops.push_back(Op.getValue(1));
Dale Johannesene4d209d2009-02-03 20:21:25 +00006389 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, &Ops[0], 4);
Evan Cheng152804e2007-12-14 08:30:15 +00006390
6391 // Finally xor with NumBits-1.
Dale Johannesene4d209d2009-02-03 20:21:25 +00006392 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
Evan Cheng152804e2007-12-14 08:30:15 +00006393
Evan Cheng18efe262007-12-14 02:13:44 +00006394 if (VT == MVT::i8)
Dale Johannesene4d209d2009-02-03 20:21:25 +00006395 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
Evan Cheng18efe262007-12-14 02:13:44 +00006396 return Op;
6397}
6398
Dan Gohman475871a2008-07-27 21:46:04 +00006399SDValue X86TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
Duncan Sands83ec4b62008-06-06 12:08:01 +00006400 MVT VT = Op.getValueType();
6401 MVT OpVT = VT;
6402 unsigned NumBits = VT.getSizeInBits();
Dale Johannesen6f38cb62009-02-07 19:59:05 +00006403 DebugLoc dl = Op.getDebugLoc();
Evan Cheng18efe262007-12-14 02:13:44 +00006404
6405 Op = Op.getOperand(0);
6406 if (VT == MVT::i8) {
6407 OpVT = MVT::i32;
Dale Johannesene4d209d2009-02-03 20:21:25 +00006408 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
Evan Cheng18efe262007-12-14 02:13:44 +00006409 }
Evan Cheng152804e2007-12-14 08:30:15 +00006410
6411 // Issue a bsf (scan bits forward) which also sets EFLAGS.
6412 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
Dale Johannesene4d209d2009-02-03 20:21:25 +00006413 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op);
Evan Cheng152804e2007-12-14 08:30:15 +00006414
6415 // If src is zero (i.e. bsf sets ZF), returns NumBits.
Dan Gohman475871a2008-07-27 21:46:04 +00006416 SmallVector<SDValue, 4> Ops;
Evan Cheng152804e2007-12-14 08:30:15 +00006417 Ops.push_back(Op);
6418 Ops.push_back(DAG.getConstant(NumBits, OpVT));
6419 Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8));
6420 Ops.push_back(Op.getValue(1));
Dale Johannesene4d209d2009-02-03 20:21:25 +00006421 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, &Ops[0], 4);
Evan Cheng152804e2007-12-14 08:30:15 +00006422
Evan Cheng18efe262007-12-14 02:13:44 +00006423 if (VT == MVT::i8)
Dale Johannesene4d209d2009-02-03 20:21:25 +00006424 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
Evan Cheng18efe262007-12-14 02:13:44 +00006425 return Op;
6426}
6427
Mon P Wangaf9b9522008-12-18 21:42:19 +00006428SDValue X86TargetLowering::LowerMUL_V2I64(SDValue Op, SelectionDAG &DAG) {
6429 MVT VT = Op.getValueType();
6430 assert(VT == MVT::v2i64 && "Only know how to lower V2I64 multiply");
Dale Johannesen6f38cb62009-02-07 19:59:05 +00006431 DebugLoc dl = Op.getDebugLoc();
Scott Michelfdc40a02009-02-17 22:15:04 +00006432
Mon P Wangaf9b9522008-12-18 21:42:19 +00006433 // ulong2 Ahi = __builtin_ia32_psrlqi128( a, 32);
6434 // ulong2 Bhi = __builtin_ia32_psrlqi128( b, 32);
6435 // ulong2 AloBlo = __builtin_ia32_pmuludq128( a, b );
6436 // ulong2 AloBhi = __builtin_ia32_pmuludq128( a, Bhi );
6437 // ulong2 AhiBlo = __builtin_ia32_pmuludq128( Ahi, b );
6438 //
6439 // AloBhi = __builtin_ia32_psllqi128( AloBhi, 32 );
6440 // AhiBlo = __builtin_ia32_psllqi128( AhiBlo, 32 );
6441 // return AloBlo + AloBhi + AhiBlo;
6442
6443 SDValue A = Op.getOperand(0);
6444 SDValue B = Op.getOperand(1);
Scott Michelfdc40a02009-02-17 22:15:04 +00006445
Dale Johannesene4d209d2009-02-03 20:21:25 +00006446 SDValue Ahi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
Mon P Wangaf9b9522008-12-18 21:42:19 +00006447 DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32),
6448 A, DAG.getConstant(32, MVT::i32));
Dale Johannesene4d209d2009-02-03 20:21:25 +00006449 SDValue Bhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
Mon P Wangaf9b9522008-12-18 21:42:19 +00006450 DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32),
6451 B, DAG.getConstant(32, MVT::i32));
Dale Johannesene4d209d2009-02-03 20:21:25 +00006452 SDValue AloBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
Mon P Wangaf9b9522008-12-18 21:42:19 +00006453 DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, MVT::i32),
6454 A, B);
Dale Johannesene4d209d2009-02-03 20:21:25 +00006455 SDValue AloBhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
Mon P Wangaf9b9522008-12-18 21:42:19 +00006456 DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, MVT::i32),
6457 A, Bhi);
Dale Johannesene4d209d2009-02-03 20:21:25 +00006458 SDValue AhiBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
Mon P Wangaf9b9522008-12-18 21:42:19 +00006459 DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, MVT::i32),
6460 Ahi, B);
Dale Johannesene4d209d2009-02-03 20:21:25 +00006461 AloBhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
Mon P Wangaf9b9522008-12-18 21:42:19 +00006462 DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32),
6463 AloBhi, DAG.getConstant(32, MVT::i32));
Dale Johannesene4d209d2009-02-03 20:21:25 +00006464 AhiBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
Mon P Wangaf9b9522008-12-18 21:42:19 +00006465 DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32),
6466 AhiBlo, DAG.getConstant(32, MVT::i32));
Dale Johannesene4d209d2009-02-03 20:21:25 +00006467 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi);
6468 Res = DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo);
Mon P Wangaf9b9522008-12-18 21:42:19 +00006469 return Res;
6470}
6471
6472
Bill Wendling74c37652008-12-09 22:08:41 +00006473SDValue X86TargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) {
6474 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
6475 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
Bill Wendling61edeb52008-12-02 01:06:39 +00006476 // looks for this combo and may remove the "setcc" instruction if the "setcc"
6477 // has only one use.
Bill Wendling3fafd932008-11-26 22:37:40 +00006478 SDNode *N = Op.getNode();
Bill Wendling61edeb52008-12-02 01:06:39 +00006479 SDValue LHS = N->getOperand(0);
6480 SDValue RHS = N->getOperand(1);
Bill Wendling74c37652008-12-09 22:08:41 +00006481 unsigned BaseOp = 0;
6482 unsigned Cond = 0;
Dale Johannesen6f38cb62009-02-07 19:59:05 +00006483 DebugLoc dl = Op.getDebugLoc();
Bill Wendling74c37652008-12-09 22:08:41 +00006484
6485 switch (Op.getOpcode()) {
6486 default: assert(0 && "Unknown ovf instruction!");
6487 case ISD::SADDO:
Dan Gohman076aee32009-03-04 19:44:21 +00006488 // A subtract of one will be selected as a INC. Note that INC doesn't
6489 // set CF, so we can't do this for UADDO.
6490 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
6491 if (C->getAPIntValue() == 1) {
6492 BaseOp = X86ISD::INC;
6493 Cond = X86::COND_O;
6494 break;
6495 }
Bill Wendlingab55ebd2008-12-12 00:56:36 +00006496 BaseOp = X86ISD::ADD;
Bill Wendling74c37652008-12-09 22:08:41 +00006497 Cond = X86::COND_O;
6498 break;
6499 case ISD::UADDO:
Bill Wendlingab55ebd2008-12-12 00:56:36 +00006500 BaseOp = X86ISD::ADD;
Dan Gohman653456c2009-01-07 00:15:08 +00006501 Cond = X86::COND_B;
Bill Wendling74c37652008-12-09 22:08:41 +00006502 break;
6503 case ISD::SSUBO:
Dan Gohman076aee32009-03-04 19:44:21 +00006504 // A subtract of one will be selected as a DEC. Note that DEC doesn't
6505 // set CF, so we can't do this for USUBO.
6506 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
6507 if (C->getAPIntValue() == 1) {
6508 BaseOp = X86ISD::DEC;
6509 Cond = X86::COND_O;
6510 break;
6511 }
Bill Wendlingab55ebd2008-12-12 00:56:36 +00006512 BaseOp = X86ISD::SUB;
Bill Wendling74c37652008-12-09 22:08:41 +00006513 Cond = X86::COND_O;
6514 break;
6515 case ISD::USUBO:
Bill Wendlingab55ebd2008-12-12 00:56:36 +00006516 BaseOp = X86ISD::SUB;
Dan Gohman653456c2009-01-07 00:15:08 +00006517 Cond = X86::COND_B;
Bill Wendling74c37652008-12-09 22:08:41 +00006518 break;
6519 case ISD::SMULO:
Bill Wendlingd350e022008-12-12 21:15:41 +00006520 BaseOp = X86ISD::SMUL;
Bill Wendling74c37652008-12-09 22:08:41 +00006521 Cond = X86::COND_O;
6522 break;
6523 case ISD::UMULO:
Bill Wendlingd350e022008-12-12 21:15:41 +00006524 BaseOp = X86ISD::UMUL;
Dan Gohman653456c2009-01-07 00:15:08 +00006525 Cond = X86::COND_B;
Bill Wendling74c37652008-12-09 22:08:41 +00006526 break;
6527 }
Bill Wendling3fafd932008-11-26 22:37:40 +00006528
Bill Wendling61edeb52008-12-02 01:06:39 +00006529 // Also sets EFLAGS.
6530 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
Dale Johannesene4d209d2009-02-03 20:21:25 +00006531 SDValue Sum = DAG.getNode(BaseOp, dl, VTs, LHS, RHS);
Bill Wendling3fafd932008-11-26 22:37:40 +00006532
Bill Wendling61edeb52008-12-02 01:06:39 +00006533 SDValue SetCC =
Dale Johannesene4d209d2009-02-03 20:21:25 +00006534 DAG.getNode(X86ISD::SETCC, dl, N->getValueType(1),
Bill Wendlingbc5e15e2008-12-10 02:01:32 +00006535 DAG.getConstant(Cond, MVT::i32), SDValue(Sum.getNode(), 1));
Bill Wendling3fafd932008-11-26 22:37:40 +00006536
Bill Wendling61edeb52008-12-02 01:06:39 +00006537 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), SetCC);
6538 return Sum;
Bill Wendling41ea7e72008-11-24 19:21:46 +00006539}
6540
Dan Gohman475871a2008-07-27 21:46:04 +00006541SDValue X86TargetLowering::LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) {
Dan Gohmanfd4418f2008-06-25 16:07:49 +00006542 MVT T = Op.getValueType();
Dale Johannesen6f38cb62009-02-07 19:59:05 +00006543 DebugLoc dl = Op.getDebugLoc();
Andrew Lenhartha76e2f02008-03-04 21:13:33 +00006544 unsigned Reg = 0;
6545 unsigned size = 0;
Duncan Sands83ec4b62008-06-06 12:08:01 +00006546 switch(T.getSimpleVT()) {
6547 default:
6548 assert(false && "Invalid value type!");
Andrew Lenharth26ed8692008-03-01 21:52:34 +00006549 case MVT::i8: Reg = X86::AL; size = 1; break;
6550 case MVT::i16: Reg = X86::AX; size = 2; break;
6551 case MVT::i32: Reg = X86::EAX; size = 4; break;
Scott Michelfdc40a02009-02-17 22:15:04 +00006552 case MVT::i64:
Duncan Sands1607f052008-12-01 11:39:25 +00006553 assert(Subtarget->is64Bit() && "Node not type legal!");
6554 Reg = X86::RAX; size = 8;
Andrew Lenharthd19189e2008-03-05 01:15:49 +00006555 break;
Bill Wendling61edeb52008-12-02 01:06:39 +00006556 }
Dale Johannesendd64c412009-02-04 00:33:20 +00006557 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), dl, Reg,
Dale Johannesend18a4622008-09-11 03:12:59 +00006558 Op.getOperand(2), SDValue());
Dan Gohman475871a2008-07-27 21:46:04 +00006559 SDValue Ops[] = { cpIn.getValue(0),
Evan Cheng8a186ae2008-09-24 23:26:36 +00006560 Op.getOperand(1),
6561 Op.getOperand(3),
6562 DAG.getTargetConstant(size, MVT::i8),
6563 cpIn.getValue(1) };
Andrew Lenharth26ed8692008-03-01 21:52:34 +00006564 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
Dale Johannesene4d209d2009-02-03 20:21:25 +00006565 SDValue Result = DAG.getNode(X86ISD::LCMPXCHG_DAG, dl, Tys, Ops, 5);
Scott Michelfdc40a02009-02-17 22:15:04 +00006566 SDValue cpOut =
Dale Johannesendd64c412009-02-04 00:33:20 +00006567 DAG.getCopyFromReg(Result.getValue(0), dl, Reg, T, Result.getValue(1));
Andrew Lenharth26ed8692008-03-01 21:52:34 +00006568 return cpOut;
6569}
6570
Duncan Sands1607f052008-12-01 11:39:25 +00006571SDValue X86TargetLowering::LowerREADCYCLECOUNTER(SDValue Op,
Gabor Greif327ef032008-08-28 23:19:51 +00006572 SelectionDAG &DAG) {
Duncan Sands1607f052008-12-01 11:39:25 +00006573 assert(Subtarget->is64Bit() && "Result not type legalized?");
Andrew Lenharthd19189e2008-03-05 01:15:49 +00006574 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
Duncan Sands1607f052008-12-01 11:39:25 +00006575 SDValue TheChain = Op.getOperand(0);
Dale Johannesen6f38cb62009-02-07 19:59:05 +00006576 DebugLoc dl = Op.getDebugLoc();
Dale Johannesene4d209d2009-02-03 20:21:25 +00006577 SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1);
Dale Johannesendd64c412009-02-04 00:33:20 +00006578 SDValue rax = DAG.getCopyFromReg(rd, dl, X86::RAX, MVT::i64, rd.getValue(1));
6579 SDValue rdx = DAG.getCopyFromReg(rax.getValue(1), dl, X86::RDX, MVT::i64,
Duncan Sands1607f052008-12-01 11:39:25 +00006580 rax.getValue(2));
Dale Johannesene4d209d2009-02-03 20:21:25 +00006581 SDValue Tmp = DAG.getNode(ISD::SHL, dl, MVT::i64, rdx,
Duncan Sands1607f052008-12-01 11:39:25 +00006582 DAG.getConstant(32, MVT::i8));
6583 SDValue Ops[] = {
Dale Johannesene4d209d2009-02-03 20:21:25 +00006584 DAG.getNode(ISD::OR, dl, MVT::i64, rax, Tmp),
Duncan Sands1607f052008-12-01 11:39:25 +00006585 rdx.getValue(1)
6586 };
Dale Johannesene4d209d2009-02-03 20:21:25 +00006587 return DAG.getMergeValues(Ops, 2, dl);
Dale Johannesen48c1bc22008-10-02 18:53:47 +00006588}
6589
Dale Johannesen71d1bf52008-09-29 22:25:26 +00006590SDValue X86TargetLowering::LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
6591 SDNode *Node = Op.getNode();
Dale Johannesene4d209d2009-02-03 20:21:25 +00006592 DebugLoc dl = Node->getDebugLoc();
Dale Johannesen71d1bf52008-09-29 22:25:26 +00006593 MVT T = Node->getValueType(0);
Dale Johannesene4d209d2009-02-03 20:21:25 +00006594 SDValue negOp = DAG.getNode(ISD::SUB, dl, T,
Evan Cheng242b38b2009-02-23 09:03:22 +00006595 DAG.getConstant(0, T), Node->getOperand(2));
Dale Johannesene4d209d2009-02-03 20:21:25 +00006596 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl,
Dan Gohman0b1d4a72008-12-23 21:37:04 +00006597 cast<AtomicSDNode>(Node)->getMemoryVT(),
Dale Johannesen71d1bf52008-09-29 22:25:26 +00006598 Node->getOperand(0),
6599 Node->getOperand(1), negOp,
6600 cast<AtomicSDNode>(Node)->getSrcValue(),
6601 cast<AtomicSDNode>(Node)->getAlignment());
Mon P Wang63307c32008-05-05 19:05:59 +00006602}
6603
Evan Cheng0db9fe62006-04-25 20:13:52 +00006604/// LowerOperation - Provide custom lowering hooks for some operations.
6605///
Dan Gohman475871a2008-07-27 21:46:04 +00006606SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
Evan Cheng0db9fe62006-04-25 20:13:52 +00006607 switch (Op.getOpcode()) {
6608 default: assert(0 && "Should not custom lower this!");
Dan Gohman0b1d4a72008-12-23 21:37:04 +00006609 case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op,DAG);
6610 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
Evan Cheng0db9fe62006-04-25 20:13:52 +00006611 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
6612 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
6613 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
6614 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
6615 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
6616 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
6617 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
Lauro Ramos Venanciob3a04172007-04-20 21:38:10 +00006618 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
Bill Wendling056292f2008-09-16 21:48:12 +00006619 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
Evan Cheng0db9fe62006-04-25 20:13:52 +00006620 case ISD::SHL_PARTS:
6621 case ISD::SRA_PARTS:
6622 case ISD::SRL_PARTS: return LowerShift(Op, DAG);
6623 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
Dale Johannesen1c15bf52008-10-21 20:50:01 +00006624 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
Evan Cheng0db9fe62006-04-25 20:13:52 +00006625 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
Eli Friedman948e95a2009-05-23 09:59:16 +00006626 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
Evan Cheng0db9fe62006-04-25 20:13:52 +00006627 case ISD::FABS: return LowerFABS(Op, DAG);
6628 case ISD::FNEG: return LowerFNEG(Op, DAG);
Evan Cheng68c47cb2007-01-05 07:55:56 +00006629 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
Evan Chenge5f62042007-09-29 00:00:36 +00006630 case ISD::SETCC: return LowerSETCC(Op, DAG);
Nate Begeman30a0de92008-07-17 16:51:19 +00006631 case ISD::VSETCC: return LowerVSETCC(Op, DAG);
Evan Chenge5f62042007-09-29 00:00:36 +00006632 case ISD::SELECT: return LowerSELECT(Op, DAG);
6633 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
Evan Cheng0db9fe62006-04-25 20:13:52 +00006634 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
Evan Cheng32fe1032006-05-25 00:59:30 +00006635 case ISD::CALL: return LowerCALL(Op, DAG);
Evan Cheng0db9fe62006-04-25 20:13:52 +00006636 case ISD::RET: return LowerRET(Op, DAG);
Evan Cheng1bc78042006-04-26 01:20:17 +00006637 case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG);
Evan Cheng0db9fe62006-04-25 20:13:52 +00006638 case ISD::VASTART: return LowerVASTART(Op, DAG);
Dan Gohman9018e832008-05-10 01:26:14 +00006639 case ISD::VAARG: return LowerVAARG(Op, DAG);
Evan Chengae642192007-03-02 23:16:35 +00006640 case ISD::VACOPY: return LowerVACOPY(Op, DAG);
Evan Cheng0db9fe62006-04-25 20:13:52 +00006641 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
Nate Begemanbcc5f362007-01-29 22:58:52 +00006642 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
6643 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
Anton Korobeynikov2365f512007-07-14 14:06:15 +00006644 case ISD::FRAME_TO_ARGS_OFFSET:
6645 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
Anton Korobeynikov57fc00d2007-04-17 09:20:00 +00006646 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
Anton Korobeynikov2365f512007-07-14 14:06:15 +00006647 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
Duncan Sandsb116fac2007-07-27 20:02:49 +00006648 case ISD::TRAMPOLINE: return LowerTRAMPOLINE(Op, DAG);
Dan Gohman1a024862008-01-31 00:41:03 +00006649 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
Evan Cheng18efe262007-12-14 02:13:44 +00006650 case ISD::CTLZ: return LowerCTLZ(Op, DAG);
6651 case ISD::CTTZ: return LowerCTTZ(Op, DAG);
Mon P Wangaf9b9522008-12-18 21:42:19 +00006652 case ISD::MUL: return LowerMUL_V2I64(Op, DAG);
Bill Wendling74c37652008-12-09 22:08:41 +00006653 case ISD::SADDO:
6654 case ISD::UADDO:
6655 case ISD::SSUBO:
6656 case ISD::USUBO:
6657 case ISD::SMULO:
6658 case ISD::UMULO: return LowerXALUO(Op, DAG);
Duncan Sands1607f052008-12-01 11:39:25 +00006659 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, DAG);
Evan Cheng0db9fe62006-04-25 20:13:52 +00006660 }
Chris Lattner27a6c732007-11-24 07:07:01 +00006661}
6662
Duncan Sands1607f052008-12-01 11:39:25 +00006663void X86TargetLowering::
6664ReplaceATOMIC_BINARY_64(SDNode *Node, SmallVectorImpl<SDValue>&Results,
6665 SelectionDAG &DAG, unsigned NewOp) {
6666 MVT T = Node->getValueType(0);
Dale Johannesene4d209d2009-02-03 20:21:25 +00006667 DebugLoc dl = Node->getDebugLoc();
Duncan Sands1607f052008-12-01 11:39:25 +00006668 assert (T == MVT::i64 && "Only know how to expand i64 atomics");
6669
6670 SDValue Chain = Node->getOperand(0);
6671 SDValue In1 = Node->getOperand(1);
Dale Johannesene4d209d2009-02-03 20:21:25 +00006672 SDValue In2L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
Duncan Sands1607f052008-12-01 11:39:25 +00006673 Node->getOperand(2), DAG.getIntPtrConstant(0));
Dale Johannesene4d209d2009-02-03 20:21:25 +00006674 SDValue In2H = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
Duncan Sands1607f052008-12-01 11:39:25 +00006675 Node->getOperand(2), DAG.getIntPtrConstant(1));
6676 // This is a generalized SDNode, not an AtomicSDNode, so it doesn't
6677 // have a MemOperand. Pass the info through as a normal operand.
6678 SDValue LSI = DAG.getMemOperand(cast<MemSDNode>(Node)->getMemOperand());
6679 SDValue Ops[] = { Chain, In1, In2L, In2H, LSI };
6680 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
Dale Johannesene4d209d2009-02-03 20:21:25 +00006681 SDValue Result = DAG.getNode(NewOp, dl, Tys, Ops, 5);
Duncan Sands1607f052008-12-01 11:39:25 +00006682 SDValue OpsF[] = { Result.getValue(0), Result.getValue(1)};
Dale Johannesene4d209d2009-02-03 20:21:25 +00006683 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2));
Duncan Sands1607f052008-12-01 11:39:25 +00006684 Results.push_back(Result.getValue(2));
6685}
6686
Duncan Sands126d9072008-07-04 11:47:58 +00006687/// ReplaceNodeResults - Replace a node with an illegal result type
6688/// with a new node built out of custom code.
Duncan Sands1607f052008-12-01 11:39:25 +00006689void X86TargetLowering::ReplaceNodeResults(SDNode *N,
6690 SmallVectorImpl<SDValue>&Results,
6691 SelectionDAG &DAG) {
Dale Johannesene4d209d2009-02-03 20:21:25 +00006692 DebugLoc dl = N->getDebugLoc();
Chris Lattner27a6c732007-11-24 07:07:01 +00006693 switch (N->getOpcode()) {
Duncan Sandsed294c42008-10-20 15:56:33 +00006694 default:
Duncan Sands1607f052008-12-01 11:39:25 +00006695 assert(false && "Do not know how to custom type legalize this operation!");
6696 return;
6697 case ISD::FP_TO_SINT: {
Eli Friedman948e95a2009-05-23 09:59:16 +00006698 std::pair<SDValue,SDValue> Vals =
6699 FP_TO_INTHelper(SDValue(N, 0), DAG, true);
Duncan Sands1607f052008-12-01 11:39:25 +00006700 SDValue FIST = Vals.first, StackSlot = Vals.second;
6701 if (FIST.getNode() != 0) {
6702 MVT VT = N->getValueType(0);
6703 // Return a load from the stack slot.
Dale Johannesene4d209d2009-02-03 20:21:25 +00006704 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot, NULL, 0));
Duncan Sands1607f052008-12-01 11:39:25 +00006705 }
6706 return;
6707 }
6708 case ISD::READCYCLECOUNTER: {
6709 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
6710 SDValue TheChain = N->getOperand(0);
Dale Johannesene4d209d2009-02-03 20:21:25 +00006711 SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1);
Scott Michelfdc40a02009-02-17 22:15:04 +00006712 SDValue eax = DAG.getCopyFromReg(rd, dl, X86::EAX, MVT::i32,
Dale Johannesendd64c412009-02-04 00:33:20 +00006713 rd.getValue(1));
6714 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), dl, X86::EDX, MVT::i32,
Duncan Sands1607f052008-12-01 11:39:25 +00006715 eax.getValue(2));
6716 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
6717 SDValue Ops[] = { eax, edx };
Dale Johannesene4d209d2009-02-03 20:21:25 +00006718 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops, 2));
Duncan Sands1607f052008-12-01 11:39:25 +00006719 Results.push_back(edx.getValue(1));
6720 return;
6721 }
Dan Gohman0b1d4a72008-12-23 21:37:04 +00006722 case ISD::ATOMIC_CMP_SWAP: {
Duncan Sands1607f052008-12-01 11:39:25 +00006723 MVT T = N->getValueType(0);
6724 assert (T == MVT::i64 && "Only know how to expand i64 Cmp and Swap");
6725 SDValue cpInL, cpInH;
Dale Johannesene4d209d2009-02-03 20:21:25 +00006726 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(2),
Duncan Sands1607f052008-12-01 11:39:25 +00006727 DAG.getConstant(0, MVT::i32));
Dale Johannesene4d209d2009-02-03 20:21:25 +00006728 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(2),
Duncan Sands1607f052008-12-01 11:39:25 +00006729 DAG.getConstant(1, MVT::i32));
Dale Johannesendd64c412009-02-04 00:33:20 +00006730 cpInL = DAG.getCopyToReg(N->getOperand(0), dl, X86::EAX, cpInL, SDValue());
6731 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl, X86::EDX, cpInH,
Duncan Sands1607f052008-12-01 11:39:25 +00006732 cpInL.getValue(1));
6733 SDValue swapInL, swapInH;
Dale Johannesene4d209d2009-02-03 20:21:25 +00006734 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(3),
Duncan Sands1607f052008-12-01 11:39:25 +00006735 DAG.getConstant(0, MVT::i32));
Dale Johannesene4d209d2009-02-03 20:21:25 +00006736 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(3),
Duncan Sands1607f052008-12-01 11:39:25 +00006737 DAG.getConstant(1, MVT::i32));
Dale Johannesendd64c412009-02-04 00:33:20 +00006738 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl, X86::EBX, swapInL,
Duncan Sands1607f052008-12-01 11:39:25 +00006739 cpInH.getValue(1));
Dale Johannesendd64c412009-02-04 00:33:20 +00006740 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl, X86::ECX, swapInH,
Duncan Sands1607f052008-12-01 11:39:25 +00006741 swapInL.getValue(1));
6742 SDValue Ops[] = { swapInH.getValue(0),
6743 N->getOperand(1),
6744 swapInH.getValue(1) };
6745 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
Dale Johannesene4d209d2009-02-03 20:21:25 +00006746 SDValue Result = DAG.getNode(X86ISD::LCMPXCHG8_DAG, dl, Tys, Ops, 3);
Dale Johannesendd64c412009-02-04 00:33:20 +00006747 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl, X86::EAX,
6748 MVT::i32, Result.getValue(1));
6749 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl, X86::EDX,
6750 MVT::i32, cpOutL.getValue(2));
Duncan Sands1607f052008-12-01 11:39:25 +00006751 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
Dale Johannesene4d209d2009-02-03 20:21:25 +00006752 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2));
Duncan Sands1607f052008-12-01 11:39:25 +00006753 Results.push_back(cpOutH.getValue(1));
6754 return;
6755 }
Dan Gohman0b1d4a72008-12-23 21:37:04 +00006756 case ISD::ATOMIC_LOAD_ADD:
Duncan Sands1607f052008-12-01 11:39:25 +00006757 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMADD64_DAG);
6758 return;
Dan Gohman0b1d4a72008-12-23 21:37:04 +00006759 case ISD::ATOMIC_LOAD_AND:
Duncan Sands1607f052008-12-01 11:39:25 +00006760 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMAND64_DAG);
6761 return;
Dan Gohman0b1d4a72008-12-23 21:37:04 +00006762 case ISD::ATOMIC_LOAD_NAND:
Duncan Sands1607f052008-12-01 11:39:25 +00006763 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMNAND64_DAG);
6764 return;
Dan Gohman0b1d4a72008-12-23 21:37:04 +00006765 case ISD::ATOMIC_LOAD_OR:
Duncan Sands1607f052008-12-01 11:39:25 +00006766 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMOR64_DAG);
6767 return;
Dan Gohman0b1d4a72008-12-23 21:37:04 +00006768 case ISD::ATOMIC_LOAD_SUB:
Duncan Sands1607f052008-12-01 11:39:25 +00006769 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMSUB64_DAG);
6770 return;
Dan Gohman0b1d4a72008-12-23 21:37:04 +00006771 case ISD::ATOMIC_LOAD_XOR:
Duncan Sands1607f052008-12-01 11:39:25 +00006772 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMXOR64_DAG);
6773 return;
Dan Gohman0b1d4a72008-12-23 21:37:04 +00006774 case ISD::ATOMIC_SWAP:
Duncan Sands1607f052008-12-01 11:39:25 +00006775 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMSWAP64_DAG);
6776 return;
Chris Lattner27a6c732007-11-24 07:07:01 +00006777 }
Evan Cheng0db9fe62006-04-25 20:13:52 +00006778}
6779
Evan Cheng72261582005-12-20 06:22:03 +00006780const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
6781 switch (Opcode) {
6782 default: return NULL;
Evan Cheng18efe262007-12-14 02:13:44 +00006783 case X86ISD::BSF: return "X86ISD::BSF";
6784 case X86ISD::BSR: return "X86ISD::BSR";
Evan Chenge3413162006-01-09 18:33:28 +00006785 case X86ISD::SHLD: return "X86ISD::SHLD";
6786 case X86ISD::SHRD: return "X86ISD::SHRD";
Evan Chengef6ffb12006-01-31 03:14:29 +00006787 case X86ISD::FAND: return "X86ISD::FAND";
Evan Cheng68c47cb2007-01-05 07:55:56 +00006788 case X86ISD::FOR: return "X86ISD::FOR";
Evan Cheng223547a2006-01-31 22:28:30 +00006789 case X86ISD::FXOR: return "X86ISD::FXOR";
Evan Cheng68c47cb2007-01-05 07:55:56 +00006790 case X86ISD::FSRL: return "X86ISD::FSRL";
Evan Chenga3195e82006-01-12 22:54:21 +00006791 case X86ISD::FILD: return "X86ISD::FILD";
Evan Chenge3de85b2006-02-04 02:20:30 +00006792 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
Evan Cheng72261582005-12-20 06:22:03 +00006793 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
6794 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
6795 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
Evan Chengb077b842005-12-21 02:39:21 +00006796 case X86ISD::FLD: return "X86ISD::FLD";
Evan Chengd90eb7f2006-01-05 00:27:02 +00006797 case X86ISD::FST: return "X86ISD::FST";
Evan Cheng72261582005-12-20 06:22:03 +00006798 case X86ISD::CALL: return "X86ISD::CALL";
6799 case X86ISD::TAILCALL: return "X86ISD::TAILCALL";
6800 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
Dan Gohmanc7a37d42008-12-23 22:45:23 +00006801 case X86ISD::BT: return "X86ISD::BT";
Evan Cheng72261582005-12-20 06:22:03 +00006802 case X86ISD::CMP: return "X86ISD::CMP";
Evan Cheng6be2c582006-04-05 23:38:46 +00006803 case X86ISD::COMI: return "X86ISD::COMI";
6804 case X86ISD::UCOMI: return "X86ISD::UCOMI";
Evan Chengd5781fc2005-12-21 20:21:51 +00006805 case X86ISD::SETCC: return "X86ISD::SETCC";
Evan Cheng72261582005-12-20 06:22:03 +00006806 case X86ISD::CMOV: return "X86ISD::CMOV";
6807 case X86ISD::BRCOND: return "X86ISD::BRCOND";
Evan Chengb077b842005-12-21 02:39:21 +00006808 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
Evan Cheng8df346b2006-03-04 01:12:00 +00006809 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
6810 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
Evan Cheng7ccced62006-02-18 00:15:05 +00006811 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
Evan Cheng020d2e82006-02-23 20:41:18 +00006812 case X86ISD::Wrapper: return "X86ISD::Wrapper";
Nate Begeman14d12ca2008-02-11 04:19:36 +00006813 case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
Evan Chengb067a1e2006-03-31 19:22:53 +00006814 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
Nate Begeman14d12ca2008-02-11 04:19:36 +00006815 case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
6816 case X86ISD::PINSRB: return "X86ISD::PINSRB";
Evan Cheng653159f2006-03-31 21:55:24 +00006817 case X86ISD::PINSRW: return "X86ISD::PINSRW";
Nate Begemanb9a47b82009-02-23 08:49:38 +00006818 case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
Evan Cheng8ca29322006-11-10 21:43:37 +00006819 case X86ISD::FMAX: return "X86ISD::FMAX";
6820 case X86ISD::FMIN: return "X86ISD::FMIN";
Dan Gohman20382522007-07-10 00:05:58 +00006821 case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
6822 case X86ISD::FRCP: return "X86ISD::FRCP";
Lauro Ramos Venanciob3a04172007-04-20 21:38:10 +00006823 case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
Rafael Espindola094fad32009-04-08 21:14:34 +00006824 case X86ISD::SegmentBaseAddress: return "X86ISD::SegmentBaseAddress";
Anton Korobeynikov2365f512007-07-14 14:06:15 +00006825 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +00006826 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
Anton Korobeynikov45b22fa2007-11-16 01:31:51 +00006827 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
Evan Cheng7e2ff772008-05-08 00:57:18 +00006828 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
6829 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
Dale Johannesen48c1bc22008-10-02 18:53:47 +00006830 case X86ISD::ATOMADD64_DAG: return "X86ISD::ATOMADD64_DAG";
6831 case X86ISD::ATOMSUB64_DAG: return "X86ISD::ATOMSUB64_DAG";
6832 case X86ISD::ATOMOR64_DAG: return "X86ISD::ATOMOR64_DAG";
6833 case X86ISD::ATOMXOR64_DAG: return "X86ISD::ATOMXOR64_DAG";
6834 case X86ISD::ATOMAND64_DAG: return "X86ISD::ATOMAND64_DAG";
6835 case X86ISD::ATOMNAND64_DAG: return "X86ISD::ATOMNAND64_DAG";
Evan Chengd880b972008-05-09 21:53:03 +00006836 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
6837 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
Evan Chengf26ffe92008-05-29 08:22:04 +00006838 case X86ISD::VSHL: return "X86ISD::VSHL";
6839 case X86ISD::VSRL: return "X86ISD::VSRL";
Nate Begeman30a0de92008-07-17 16:51:19 +00006840 case X86ISD::CMPPD: return "X86ISD::CMPPD";
6841 case X86ISD::CMPPS: return "X86ISD::CMPPS";
6842 case X86ISD::PCMPEQB: return "X86ISD::PCMPEQB";
6843 case X86ISD::PCMPEQW: return "X86ISD::PCMPEQW";
6844 case X86ISD::PCMPEQD: return "X86ISD::PCMPEQD";
6845 case X86ISD::PCMPEQQ: return "X86ISD::PCMPEQQ";
6846 case X86ISD::PCMPGTB: return "X86ISD::PCMPGTB";
6847 case X86ISD::PCMPGTW: return "X86ISD::PCMPGTW";
6848 case X86ISD::PCMPGTD: return "X86ISD::PCMPGTD";
6849 case X86ISD::PCMPGTQ: return "X86ISD::PCMPGTQ";
Bill Wendlingab55ebd2008-12-12 00:56:36 +00006850 case X86ISD::ADD: return "X86ISD::ADD";
6851 case X86ISD::SUB: return "X86ISD::SUB";
Bill Wendlingd350e022008-12-12 21:15:41 +00006852 case X86ISD::SMUL: return "X86ISD::SMUL";
6853 case X86ISD::UMUL: return "X86ISD::UMUL";
Dan Gohman076aee32009-03-04 19:44:21 +00006854 case X86ISD::INC: return "X86ISD::INC";
6855 case X86ISD::DEC: return "X86ISD::DEC";
Evan Cheng73f24c92009-03-30 21:36:47 +00006856 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
Evan Cheng72261582005-12-20 06:22:03 +00006857 }
6858}
Evan Cheng3a03ebb2005-12-21 23:05:39 +00006859
Chris Lattnerc9addb72007-03-30 23:15:24 +00006860// isLegalAddressingMode - Return true if the addressing mode represented
6861// by AM is legal for this target, for a load/store of the specified type.
Scott Michelfdc40a02009-02-17 22:15:04 +00006862bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
Chris Lattnerc9addb72007-03-30 23:15:24 +00006863 const Type *Ty) const {
6864 // X86 supports extremely general addressing modes.
Scott Michelfdc40a02009-02-17 22:15:04 +00006865
Chris Lattnerc9addb72007-03-30 23:15:24 +00006866 // X86 allows a sign-extended 32-bit immediate field as a displacement.
6867 if (AM.BaseOffs <= -(1LL << 32) || AM.BaseOffs >= (1LL << 32)-1)
6868 return false;
Scott Michelfdc40a02009-02-17 22:15:04 +00006869
Chris Lattnerc9addb72007-03-30 23:15:24 +00006870 if (AM.BaseGV) {
Evan Cheng52787842007-08-01 23:46:47 +00006871 // We can only fold this if we don't need an extra load.
Chris Lattnerc9addb72007-03-30 23:15:24 +00006872 if (Subtarget->GVRequiresExtraLoad(AM.BaseGV, getTargetMachine(), false))
6873 return false;
Dale Johannesen203af582008-12-05 21:47:27 +00006874 // If BaseGV requires a register, we cannot also have a BaseReg.
6875 if (Subtarget->GVRequiresRegister(AM.BaseGV, getTargetMachine(), false) &&
6876 AM.HasBaseReg)
6877 return false;
Evan Cheng52787842007-08-01 23:46:47 +00006878
6879 // X86-64 only supports addr of globals in small code model.
6880 if (Subtarget->is64Bit()) {
6881 if (getTargetMachine().getCodeModel() != CodeModel::Small)
6882 return false;
6883 // If lower 4G is not available, then we must use rip-relative addressing.
6884 if (AM.BaseOffs || AM.Scale > 1)
6885 return false;
6886 }
Chris Lattnerc9addb72007-03-30 23:15:24 +00006887 }
Scott Michelfdc40a02009-02-17 22:15:04 +00006888
Chris Lattnerc9addb72007-03-30 23:15:24 +00006889 switch (AM.Scale) {
6890 case 0:
6891 case 1:
6892 case 2:
6893 case 4:
6894 case 8:
6895 // These scales always work.
6896 break;
6897 case 3:
6898 case 5:
6899 case 9:
6900 // These scales are formed with basereg+scalereg. Only accept if there is
6901 // no basereg yet.
6902 if (AM.HasBaseReg)
6903 return false;
6904 break;
6905 default: // Other stuff never works.
6906 return false;
6907 }
Scott Michelfdc40a02009-02-17 22:15:04 +00006908
Chris Lattnerc9addb72007-03-30 23:15:24 +00006909 return true;
6910}
6911
6912
Evan Cheng2bd122c2007-10-26 01:56:11 +00006913bool X86TargetLowering::isTruncateFree(const Type *Ty1, const Type *Ty2) const {
6914 if (!Ty1->isInteger() || !Ty2->isInteger())
6915 return false;
Evan Chenge127a732007-10-29 07:57:50 +00006916 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
6917 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
Evan Cheng260e07e2008-03-20 02:18:41 +00006918 if (NumBits1 <= NumBits2)
Evan Chenge127a732007-10-29 07:57:50 +00006919 return false;
6920 return Subtarget->is64Bit() || NumBits1 < 64;
Evan Cheng2bd122c2007-10-26 01:56:11 +00006921}
6922
Duncan Sands83ec4b62008-06-06 12:08:01 +00006923bool X86TargetLowering::isTruncateFree(MVT VT1, MVT VT2) const {
6924 if (!VT1.isInteger() || !VT2.isInteger())
Evan Cheng3c3ddb32007-10-29 19:58:20 +00006925 return false;
Duncan Sands83ec4b62008-06-06 12:08:01 +00006926 unsigned NumBits1 = VT1.getSizeInBits();
6927 unsigned NumBits2 = VT2.getSizeInBits();
Evan Cheng260e07e2008-03-20 02:18:41 +00006928 if (NumBits1 <= NumBits2)
Evan Cheng3c3ddb32007-10-29 19:58:20 +00006929 return false;
6930 return Subtarget->is64Bit() || NumBits1 < 64;
6931}
Evan Cheng2bd122c2007-10-26 01:56:11 +00006932
Dan Gohman97121ba2009-04-08 00:15:30 +00006933bool X86TargetLowering::isZExtFree(const Type *Ty1, const Type *Ty2) const {
Dan Gohman349ba492009-04-09 02:06:09 +00006934 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
Dan Gohman97121ba2009-04-08 00:15:30 +00006935 return Ty1 == Type::Int32Ty && Ty2 == Type::Int64Ty && Subtarget->is64Bit();
6936}
6937
6938bool X86TargetLowering::isZExtFree(MVT VT1, MVT VT2) const {
Dan Gohman349ba492009-04-09 02:06:09 +00006939 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
Dan Gohman97121ba2009-04-08 00:15:30 +00006940 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit();
6941}
6942
Evan Cheng8b944d32009-05-28 00:35:15 +00006943bool X86TargetLowering::isNarrowingProfitable(MVT VT1, MVT VT2) const {
6944 // i16 instructions are longer (0x66 prefix) and potentially slower.
6945 return !(VT1 == MVT::i32 && VT2 == MVT::i16);
6946}
6947
Evan Cheng60c07e12006-07-05 22:17:51 +00006948/// isShuffleMaskLegal - Targets can use this to indicate that they only
6949/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
6950/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
6951/// are assumed to be legal.
6952bool
Nate Begeman5a5ca152009-04-29 05:20:52 +00006953X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
6954 MVT VT) const {
Evan Cheng60c07e12006-07-05 22:17:51 +00006955 // Only do shuffles on 128-bit vector types for now.
Nate Begeman9008ca62009-04-27 18:41:29 +00006956 if (VT.getSizeInBits() == 64)
6957 return false;
6958
6959 // FIXME: pshufb, blends, palignr, shifts.
6960 return (VT.getVectorNumElements() == 2 ||
6961 ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
6962 isMOVLMask(M, VT) ||
6963 isSHUFPMask(M, VT) ||
6964 isPSHUFDMask(M, VT) ||
6965 isPSHUFHWMask(M, VT) ||
6966 isPSHUFLWMask(M, VT) ||
6967 isUNPCKLMask(M, VT) ||
6968 isUNPCKHMask(M, VT) ||
6969 isUNPCKL_v_undef_Mask(M, VT) ||
6970 isUNPCKH_v_undef_Mask(M, VT));
Evan Cheng60c07e12006-07-05 22:17:51 +00006971}
6972
Dan Gohman7d8143f2008-04-09 20:09:42 +00006973bool
Nate Begeman5a5ca152009-04-29 05:20:52 +00006974X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
Nate Begeman9008ca62009-04-27 18:41:29 +00006975 MVT VT) const {
6976 unsigned NumElts = VT.getVectorNumElements();
6977 // FIXME: This collection of masks seems suspect.
6978 if (NumElts == 2)
6979 return true;
6980 if (NumElts == 4 && VT.getSizeInBits() == 128) {
6981 return (isMOVLMask(Mask, VT) ||
6982 isCommutedMOVLMask(Mask, VT, true) ||
6983 isSHUFPMask(Mask, VT) ||
6984 isCommutedSHUFPMask(Mask, VT));
Evan Cheng60c07e12006-07-05 22:17:51 +00006985 }
6986 return false;
6987}
6988
6989//===----------------------------------------------------------------------===//
6990// X86 Scheduler Hooks
6991//===----------------------------------------------------------------------===//
6992
Mon P Wang63307c32008-05-05 19:05:59 +00006993// private utility function
6994MachineBasicBlock *
6995X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr,
6996 MachineBasicBlock *MBB,
6997 unsigned regOpc,
Andrew Lenharth507a58a2008-06-14 05:48:15 +00006998 unsigned immOpc,
Dale Johannesen140be2d2008-08-19 18:47:28 +00006999 unsigned LoadOpc,
7000 unsigned CXchgOpc,
7001 unsigned copyOpc,
7002 unsigned notOpc,
7003 unsigned EAXreg,
7004 TargetRegisterClass *RC,
Dan Gohman1fdbc1d2009-02-07 16:15:20 +00007005 bool invSrc) const {
Mon P Wang63307c32008-05-05 19:05:59 +00007006 // For the atomic bitwise operator, we generate
7007 // thisMBB:
7008 // newMBB:
Mon P Wangab3e7472008-05-05 22:56:23 +00007009 // ld t1 = [bitinstr.addr]
7010 // op t2 = t1, [bitinstr.val]
7011 // mov EAX = t1
Mon P Wang63307c32008-05-05 19:05:59 +00007012 // lcs dest = [bitinstr.addr], t2 [EAX is implicit]
7013 // bz newMBB
7014 // fallthrough -->nextMBB
7015 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
7016 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
Dan Gohman8e5f2c62008-07-07 23:14:23 +00007017 MachineFunction::iterator MBBIter = MBB;
Mon P Wang63307c32008-05-05 19:05:59 +00007018 ++MBBIter;
Scott Michelfdc40a02009-02-17 22:15:04 +00007019
Mon P Wang63307c32008-05-05 19:05:59 +00007020 /// First build the CFG
7021 MachineFunction *F = MBB->getParent();
7022 MachineBasicBlock *thisMBB = MBB;
Dan Gohman8e5f2c62008-07-07 23:14:23 +00007023 MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB);
7024 MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB);
7025 F->insert(MBBIter, newMBB);
7026 F->insert(MBBIter, nextMBB);
Scott Michelfdc40a02009-02-17 22:15:04 +00007027
Mon P Wang63307c32008-05-05 19:05:59 +00007028 // Move all successors to thisMBB to nextMBB
7029 nextMBB->transferSuccessors(thisMBB);
Scott Michelfdc40a02009-02-17 22:15:04 +00007030
Mon P Wang63307c32008-05-05 19:05:59 +00007031 // Update thisMBB to fall through to newMBB
7032 thisMBB->addSuccessor(newMBB);
Scott Michelfdc40a02009-02-17 22:15:04 +00007033
Mon P Wang63307c32008-05-05 19:05:59 +00007034 // newMBB jumps to itself and fall through to nextMBB
7035 newMBB->addSuccessor(nextMBB);
7036 newMBB->addSuccessor(newMBB);
Scott Michelfdc40a02009-02-17 22:15:04 +00007037
Mon P Wang63307c32008-05-05 19:05:59 +00007038 // Insert instructions into newMBB based on incoming instruction
Rafael Espindolaa82dfca2009-03-27 15:26:30 +00007039 assert(bInstr->getNumOperands() < X86AddrNumOperands + 4 &&
Bill Wendling51b16f42009-05-30 01:09:53 +00007040 "unexpected number of operands");
Dale Johannesene4d209d2009-02-03 20:21:25 +00007041 DebugLoc dl = bInstr->getDebugLoc();
Mon P Wang63307c32008-05-05 19:05:59 +00007042 MachineOperand& destOper = bInstr->getOperand(0);
Rafael Espindolaa82dfca2009-03-27 15:26:30 +00007043 MachineOperand* argOpers[2 + X86AddrNumOperands];
Mon P Wang63307c32008-05-05 19:05:59 +00007044 int numArgs = bInstr->getNumOperands() - 1;
7045 for (int i=0; i < numArgs; ++i)
7046 argOpers[i] = &bInstr->getOperand(i+1);
7047
7048 // x86 address has 4 operands: base, index, scale, and displacement
Rafael Espindolaa82dfca2009-03-27 15:26:30 +00007049 int lastAddrIndx = X86AddrNumOperands - 1; // [0,3]
7050 int valArgIndx = lastAddrIndx + 1;
Scott Michelfdc40a02009-02-17 22:15:04 +00007051
Dale Johannesen140be2d2008-08-19 18:47:28 +00007052 unsigned t1 = F->getRegInfo().createVirtualRegister(RC);
Dale Johannesene4d209d2009-02-03 20:21:25 +00007053 MachineInstrBuilder MIB = BuildMI(newMBB, dl, TII->get(LoadOpc), t1);
Mon P Wang63307c32008-05-05 19:05:59 +00007054 for (int i=0; i <= lastAddrIndx; ++i)
7055 (*MIB).addOperand(*argOpers[i]);
Andrew Lenharth507a58a2008-06-14 05:48:15 +00007056
Dale Johannesen140be2d2008-08-19 18:47:28 +00007057 unsigned tt = F->getRegInfo().createVirtualRegister(RC);
Andrew Lenharth507a58a2008-06-14 05:48:15 +00007058 if (invSrc) {
Dale Johannesene4d209d2009-02-03 20:21:25 +00007059 MIB = BuildMI(newMBB, dl, TII->get(notOpc), tt).addReg(t1);
Andrew Lenharth507a58a2008-06-14 05:48:15 +00007060 }
Scott Michelfdc40a02009-02-17 22:15:04 +00007061 else
Andrew Lenharth507a58a2008-06-14 05:48:15 +00007062 tt = t1;
7063
Dale Johannesen140be2d2008-08-19 18:47:28 +00007064 unsigned t2 = F->getRegInfo().createVirtualRegister(RC);
Dan Gohmand735b802008-10-03 15:45:36 +00007065 assert((argOpers[valArgIndx]->isReg() ||
7066 argOpers[valArgIndx]->isImm()) &&
Dan Gohman014278e2008-09-13 17:58:21 +00007067 "invalid operand");
Dan Gohmand735b802008-10-03 15:45:36 +00007068 if (argOpers[valArgIndx]->isReg())
Dale Johannesene4d209d2009-02-03 20:21:25 +00007069 MIB = BuildMI(newMBB, dl, TII->get(regOpc), t2);
Mon P Wang63307c32008-05-05 19:05:59 +00007070 else
Dale Johannesene4d209d2009-02-03 20:21:25 +00007071 MIB = BuildMI(newMBB, dl, TII->get(immOpc), t2);
Andrew Lenharth507a58a2008-06-14 05:48:15 +00007072 MIB.addReg(tt);
Mon P Wang63307c32008-05-05 19:05:59 +00007073 (*MIB).addOperand(*argOpers[valArgIndx]);
Andrew Lenharth507a58a2008-06-14 05:48:15 +00007074
Dale Johannesene4d209d2009-02-03 20:21:25 +00007075 MIB = BuildMI(newMBB, dl, TII->get(copyOpc), EAXreg);
Mon P Wangab3e7472008-05-05 22:56:23 +00007076 MIB.addReg(t1);
Scott Michelfdc40a02009-02-17 22:15:04 +00007077
Dale Johannesene4d209d2009-02-03 20:21:25 +00007078 MIB = BuildMI(newMBB, dl, TII->get(CXchgOpc));
Mon P Wang63307c32008-05-05 19:05:59 +00007079 for (int i=0; i <= lastAddrIndx; ++i)
7080 (*MIB).addOperand(*argOpers[i]);
7081 MIB.addReg(t2);
Mon P Wangf5952662008-07-17 04:54:06 +00007082 assert(bInstr->hasOneMemOperand() && "Unexpected number of memoperand");
7083 (*MIB).addMemOperand(*F, *bInstr->memoperands_begin());
7084
Dale Johannesene4d209d2009-02-03 20:21:25 +00007085 MIB = BuildMI(newMBB, dl, TII->get(copyOpc), destOper.getReg());
Dale Johannesen140be2d2008-08-19 18:47:28 +00007086 MIB.addReg(EAXreg);
Scott Michelfdc40a02009-02-17 22:15:04 +00007087
Mon P Wang63307c32008-05-05 19:05:59 +00007088 // insert branch
Dale Johannesene4d209d2009-02-03 20:21:25 +00007089 BuildMI(newMBB, dl, TII->get(X86::JNE)).addMBB(newMBB);
Mon P Wang63307c32008-05-05 19:05:59 +00007090
Dan Gohman8e5f2c62008-07-07 23:14:23 +00007091 F->DeleteMachineInstr(bInstr); // The pseudo instruction is gone now.
Mon P Wang63307c32008-05-05 19:05:59 +00007092 return nextMBB;
7093}
7094
Dale Johannesen1b54c7f2008-10-03 19:41:08 +00007095// private utility function: 64 bit atomics on 32 bit host.
Mon P Wang63307c32008-05-05 19:05:59 +00007096MachineBasicBlock *
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007097X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr,
7098 MachineBasicBlock *MBB,
7099 unsigned regOpcL,
7100 unsigned regOpcH,
7101 unsigned immOpcL,
7102 unsigned immOpcH,
Dan Gohman1fdbc1d2009-02-07 16:15:20 +00007103 bool invSrc) const {
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007104 // For the atomic bitwise operator, we generate
7105 // thisMBB (instructions are in pairs, except cmpxchg8b)
7106 // ld t1,t2 = [bitinstr.addr]
7107 // newMBB:
7108 // out1, out2 = phi (thisMBB, t1/t2) (newMBB, t3/t4)
7109 // op t5, t6 <- out1, out2, [bitinstr.val]
Dale Johannesen880ae362008-10-03 22:25:52 +00007110 // (for SWAP, substitute: mov t5, t6 <- [bitinstr.val])
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007111 // mov ECX, EBX <- t5, t6
7112 // mov EAX, EDX <- t1, t2
7113 // cmpxchg8b [bitinstr.addr] [EAX, EDX, EBX, ECX implicit]
7114 // mov t3, t4 <- EAX, EDX
7115 // bz newMBB
7116 // result in out1, out2
7117 // fallthrough -->nextMBB
7118
7119 const TargetRegisterClass *RC = X86::GR32RegisterClass;
7120 const unsigned LoadOpc = X86::MOV32rm;
7121 const unsigned copyOpc = X86::MOV32rr;
7122 const unsigned NotOpc = X86::NOT32r;
7123 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
7124 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
7125 MachineFunction::iterator MBBIter = MBB;
7126 ++MBBIter;
Scott Michelfdc40a02009-02-17 22:15:04 +00007127
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007128 /// First build the CFG
7129 MachineFunction *F = MBB->getParent();
7130 MachineBasicBlock *thisMBB = MBB;
7131 MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB);
7132 MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB);
7133 F->insert(MBBIter, newMBB);
7134 F->insert(MBBIter, nextMBB);
Scott Michelfdc40a02009-02-17 22:15:04 +00007135
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007136 // Move all successors to thisMBB to nextMBB
7137 nextMBB->transferSuccessors(thisMBB);
Scott Michelfdc40a02009-02-17 22:15:04 +00007138
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007139 // Update thisMBB to fall through to newMBB
7140 thisMBB->addSuccessor(newMBB);
Scott Michelfdc40a02009-02-17 22:15:04 +00007141
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007142 // newMBB jumps to itself and fall through to nextMBB
7143 newMBB->addSuccessor(nextMBB);
7144 newMBB->addSuccessor(newMBB);
Scott Michelfdc40a02009-02-17 22:15:04 +00007145
Dale Johannesene4d209d2009-02-03 20:21:25 +00007146 DebugLoc dl = bInstr->getDebugLoc();
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007147 // Insert instructions into newMBB based on incoming instruction
7148 // There are 8 "real" operands plus 9 implicit def/uses, ignored here.
Rafael Espindolaa82dfca2009-03-27 15:26:30 +00007149 assert(bInstr->getNumOperands() < X86AddrNumOperands + 14 &&
Bill Wendling51b16f42009-05-30 01:09:53 +00007150 "unexpected number of operands");
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007151 MachineOperand& dest1Oper = bInstr->getOperand(0);
7152 MachineOperand& dest2Oper = bInstr->getOperand(1);
Rafael Espindolaa82dfca2009-03-27 15:26:30 +00007153 MachineOperand* argOpers[2 + X86AddrNumOperands];
7154 for (int i=0; i < 2 + X86AddrNumOperands; ++i)
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007155 argOpers[i] = &bInstr->getOperand(i+2);
7156
7157 // x86 address has 4 operands: base, index, scale, and displacement
Rafael Espindolaa82dfca2009-03-27 15:26:30 +00007158 int lastAddrIndx = X86AddrNumOperands - 1; // [0,3]
Scott Michelfdc40a02009-02-17 22:15:04 +00007159
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007160 unsigned t1 = F->getRegInfo().createVirtualRegister(RC);
Dale Johannesene4d209d2009-02-03 20:21:25 +00007161 MachineInstrBuilder MIB = BuildMI(thisMBB, dl, TII->get(LoadOpc), t1);
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007162 for (int i=0; i <= lastAddrIndx; ++i)
7163 (*MIB).addOperand(*argOpers[i]);
7164 unsigned t2 = F->getRegInfo().createVirtualRegister(RC);
Dale Johannesene4d209d2009-02-03 20:21:25 +00007165 MIB = BuildMI(thisMBB, dl, TII->get(LoadOpc), t2);
Dale Johannesen880ae362008-10-03 22:25:52 +00007166 // add 4 to displacement.
Rafael Espindola094fad32009-04-08 21:14:34 +00007167 for (int i=0; i <= lastAddrIndx-2; ++i)
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007168 (*MIB).addOperand(*argOpers[i]);
Dale Johannesen880ae362008-10-03 22:25:52 +00007169 MachineOperand newOp3 = *(argOpers[3]);
7170 if (newOp3.isImm())
7171 newOp3.setImm(newOp3.getImm()+4);
7172 else
7173 newOp3.setOffset(newOp3.getOffset()+4);
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007174 (*MIB).addOperand(newOp3);
Rafael Espindola094fad32009-04-08 21:14:34 +00007175 (*MIB).addOperand(*argOpers[lastAddrIndx]);
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007176
7177 // t3/4 are defined later, at the bottom of the loop
7178 unsigned t3 = F->getRegInfo().createVirtualRegister(RC);
7179 unsigned t4 = F->getRegInfo().createVirtualRegister(RC);
Dale Johannesene4d209d2009-02-03 20:21:25 +00007180 BuildMI(newMBB, dl, TII->get(X86::PHI), dest1Oper.getReg())
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007181 .addReg(t1).addMBB(thisMBB).addReg(t3).addMBB(newMBB);
Dale Johannesene4d209d2009-02-03 20:21:25 +00007182 BuildMI(newMBB, dl, TII->get(X86::PHI), dest2Oper.getReg())
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007183 .addReg(t2).addMBB(thisMBB).addReg(t4).addMBB(newMBB);
7184
7185 unsigned tt1 = F->getRegInfo().createVirtualRegister(RC);
7186 unsigned tt2 = F->getRegInfo().createVirtualRegister(RC);
Scott Michelfdc40a02009-02-17 22:15:04 +00007187 if (invSrc) {
Dale Johannesene4d209d2009-02-03 20:21:25 +00007188 MIB = BuildMI(newMBB, dl, TII->get(NotOpc), tt1).addReg(t1);
7189 MIB = BuildMI(newMBB, dl, TII->get(NotOpc), tt2).addReg(t2);
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007190 } else {
7191 tt1 = t1;
7192 tt2 = t2;
7193 }
7194
Rafael Espindolaa82dfca2009-03-27 15:26:30 +00007195 int valArgIndx = lastAddrIndx + 1;
7196 assert((argOpers[valArgIndx]->isReg() ||
Bill Wendling51b16f42009-05-30 01:09:53 +00007197 argOpers[valArgIndx]->isImm()) &&
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007198 "invalid operand");
7199 unsigned t5 = F->getRegInfo().createVirtualRegister(RC);
7200 unsigned t6 = F->getRegInfo().createVirtualRegister(RC);
Rafael Espindolaa82dfca2009-03-27 15:26:30 +00007201 if (argOpers[valArgIndx]->isReg())
Dale Johannesene4d209d2009-02-03 20:21:25 +00007202 MIB = BuildMI(newMBB, dl, TII->get(regOpcL), t5);
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007203 else
Dale Johannesene4d209d2009-02-03 20:21:25 +00007204 MIB = BuildMI(newMBB, dl, TII->get(immOpcL), t5);
Dale Johannesen880ae362008-10-03 22:25:52 +00007205 if (regOpcL != X86::MOV32rr)
7206 MIB.addReg(tt1);
Rafael Espindolaa82dfca2009-03-27 15:26:30 +00007207 (*MIB).addOperand(*argOpers[valArgIndx]);
7208 assert(argOpers[valArgIndx + 1]->isReg() ==
Bill Wendling51b16f42009-05-30 01:09:53 +00007209 argOpers[valArgIndx]->isReg());
Rafael Espindolaa82dfca2009-03-27 15:26:30 +00007210 assert(argOpers[valArgIndx + 1]->isImm() ==
Bill Wendling51b16f42009-05-30 01:09:53 +00007211 argOpers[valArgIndx]->isImm());
Rafael Espindolaa82dfca2009-03-27 15:26:30 +00007212 if (argOpers[valArgIndx + 1]->isReg())
Dale Johannesene4d209d2009-02-03 20:21:25 +00007213 MIB = BuildMI(newMBB, dl, TII->get(regOpcH), t6);
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007214 else
Dale Johannesene4d209d2009-02-03 20:21:25 +00007215 MIB = BuildMI(newMBB, dl, TII->get(immOpcH), t6);
Dale Johannesen880ae362008-10-03 22:25:52 +00007216 if (regOpcH != X86::MOV32rr)
7217 MIB.addReg(tt2);
Rafael Espindolaa82dfca2009-03-27 15:26:30 +00007218 (*MIB).addOperand(*argOpers[valArgIndx + 1]);
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007219
Dale Johannesene4d209d2009-02-03 20:21:25 +00007220 MIB = BuildMI(newMBB, dl, TII->get(copyOpc), X86::EAX);
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007221 MIB.addReg(t1);
Dale Johannesene4d209d2009-02-03 20:21:25 +00007222 MIB = BuildMI(newMBB, dl, TII->get(copyOpc), X86::EDX);
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007223 MIB.addReg(t2);
7224
Dale Johannesene4d209d2009-02-03 20:21:25 +00007225 MIB = BuildMI(newMBB, dl, TII->get(copyOpc), X86::EBX);
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007226 MIB.addReg(t5);
Dale Johannesene4d209d2009-02-03 20:21:25 +00007227 MIB = BuildMI(newMBB, dl, TII->get(copyOpc), X86::ECX);
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007228 MIB.addReg(t6);
Scott Michelfdc40a02009-02-17 22:15:04 +00007229
Dale Johannesene4d209d2009-02-03 20:21:25 +00007230 MIB = BuildMI(newMBB, dl, TII->get(X86::LCMPXCHG8B));
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007231 for (int i=0; i <= lastAddrIndx; ++i)
7232 (*MIB).addOperand(*argOpers[i]);
7233
7234 assert(bInstr->hasOneMemOperand() && "Unexpected number of memoperand");
7235 (*MIB).addMemOperand(*F, *bInstr->memoperands_begin());
7236
Dale Johannesene4d209d2009-02-03 20:21:25 +00007237 MIB = BuildMI(newMBB, dl, TII->get(copyOpc), t3);
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007238 MIB.addReg(X86::EAX);
Dale Johannesene4d209d2009-02-03 20:21:25 +00007239 MIB = BuildMI(newMBB, dl, TII->get(copyOpc), t4);
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007240 MIB.addReg(X86::EDX);
Scott Michelfdc40a02009-02-17 22:15:04 +00007241
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007242 // insert branch
Dale Johannesene4d209d2009-02-03 20:21:25 +00007243 BuildMI(newMBB, dl, TII->get(X86::JNE)).addMBB(newMBB);
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007244
7245 F->DeleteMachineInstr(bInstr); // The pseudo instruction is gone now.
7246 return nextMBB;
7247}
7248
7249// private utility function
7250MachineBasicBlock *
Mon P Wang63307c32008-05-05 19:05:59 +00007251X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr,
7252 MachineBasicBlock *MBB,
Dan Gohman1fdbc1d2009-02-07 16:15:20 +00007253 unsigned cmovOpc) const {
Mon P Wang63307c32008-05-05 19:05:59 +00007254 // For the atomic min/max operator, we generate
7255 // thisMBB:
7256 // newMBB:
Mon P Wangab3e7472008-05-05 22:56:23 +00007257 // ld t1 = [min/max.addr]
Scott Michelfdc40a02009-02-17 22:15:04 +00007258 // mov t2 = [min/max.val]
Mon P Wang63307c32008-05-05 19:05:59 +00007259 // cmp t1, t2
7260 // cmov[cond] t2 = t1
Mon P Wangab3e7472008-05-05 22:56:23 +00007261 // mov EAX = t1
Mon P Wang63307c32008-05-05 19:05:59 +00007262 // lcs dest = [bitinstr.addr], t2 [EAX is implicit]
7263 // bz newMBB
7264 // fallthrough -->nextMBB
7265 //
7266 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
7267 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
Dan Gohman8e5f2c62008-07-07 23:14:23 +00007268 MachineFunction::iterator MBBIter = MBB;
Mon P Wang63307c32008-05-05 19:05:59 +00007269 ++MBBIter;
Scott Michelfdc40a02009-02-17 22:15:04 +00007270
Mon P Wang63307c32008-05-05 19:05:59 +00007271 /// First build the CFG
7272 MachineFunction *F = MBB->getParent();
7273 MachineBasicBlock *thisMBB = MBB;
Dan Gohman8e5f2c62008-07-07 23:14:23 +00007274 MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB);
7275 MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB);
7276 F->insert(MBBIter, newMBB);
7277 F->insert(MBBIter, nextMBB);
Scott Michelfdc40a02009-02-17 22:15:04 +00007278
Mon P Wang63307c32008-05-05 19:05:59 +00007279 // Move all successors to thisMBB to nextMBB
7280 nextMBB->transferSuccessors(thisMBB);
Scott Michelfdc40a02009-02-17 22:15:04 +00007281
Mon P Wang63307c32008-05-05 19:05:59 +00007282 // Update thisMBB to fall through to newMBB
7283 thisMBB->addSuccessor(newMBB);
Scott Michelfdc40a02009-02-17 22:15:04 +00007284
Mon P Wang63307c32008-05-05 19:05:59 +00007285 // newMBB jumps to newMBB and fall through to nextMBB
7286 newMBB->addSuccessor(nextMBB);
7287 newMBB->addSuccessor(newMBB);
Scott Michelfdc40a02009-02-17 22:15:04 +00007288
Dale Johannesene4d209d2009-02-03 20:21:25 +00007289 DebugLoc dl = mInstr->getDebugLoc();
Mon P Wang63307c32008-05-05 19:05:59 +00007290 // Insert instructions into newMBB based on incoming instruction
Rafael Espindolaa82dfca2009-03-27 15:26:30 +00007291 assert(mInstr->getNumOperands() < X86AddrNumOperands + 4 &&
Bill Wendling51b16f42009-05-30 01:09:53 +00007292 "unexpected number of operands");
Mon P Wang63307c32008-05-05 19:05:59 +00007293 MachineOperand& destOper = mInstr->getOperand(0);
Rafael Espindolaa82dfca2009-03-27 15:26:30 +00007294 MachineOperand* argOpers[2 + X86AddrNumOperands];
Mon P Wang63307c32008-05-05 19:05:59 +00007295 int numArgs = mInstr->getNumOperands() - 1;
7296 for (int i=0; i < numArgs; ++i)
7297 argOpers[i] = &mInstr->getOperand(i+1);
Scott Michelfdc40a02009-02-17 22:15:04 +00007298
Mon P Wang63307c32008-05-05 19:05:59 +00007299 // x86 address has 4 operands: base, index, scale, and displacement
Rafael Espindolaa82dfca2009-03-27 15:26:30 +00007300 int lastAddrIndx = X86AddrNumOperands - 1; // [0,3]
7301 int valArgIndx = lastAddrIndx + 1;
Scott Michelfdc40a02009-02-17 22:15:04 +00007302
Mon P Wangab3e7472008-05-05 22:56:23 +00007303 unsigned t1 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass);
Dale Johannesene4d209d2009-02-03 20:21:25 +00007304 MachineInstrBuilder MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rm), t1);
Mon P Wang63307c32008-05-05 19:05:59 +00007305 for (int i=0; i <= lastAddrIndx; ++i)
7306 (*MIB).addOperand(*argOpers[i]);
Mon P Wangab3e7472008-05-05 22:56:23 +00007307
Mon P Wang63307c32008-05-05 19:05:59 +00007308 // We only support register and immediate values
Dan Gohmand735b802008-10-03 15:45:36 +00007309 assert((argOpers[valArgIndx]->isReg() ||
7310 argOpers[valArgIndx]->isImm()) &&
Dan Gohman014278e2008-09-13 17:58:21 +00007311 "invalid operand");
Scott Michelfdc40a02009-02-17 22:15:04 +00007312
7313 unsigned t2 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass);
Dan Gohmand735b802008-10-03 15:45:36 +00007314 if (argOpers[valArgIndx]->isReg())
Dale Johannesene4d209d2009-02-03 20:21:25 +00007315 MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rr), t2);
Scott Michelfdc40a02009-02-17 22:15:04 +00007316 else
Dale Johannesene4d209d2009-02-03 20:21:25 +00007317 MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rr), t2);
Mon P Wang63307c32008-05-05 19:05:59 +00007318 (*MIB).addOperand(*argOpers[valArgIndx]);
7319
Dale Johannesene4d209d2009-02-03 20:21:25 +00007320 MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rr), X86::EAX);
Mon P Wangab3e7472008-05-05 22:56:23 +00007321 MIB.addReg(t1);
7322
Dale Johannesene4d209d2009-02-03 20:21:25 +00007323 MIB = BuildMI(newMBB, dl, TII->get(X86::CMP32rr));
Mon P Wang63307c32008-05-05 19:05:59 +00007324 MIB.addReg(t1);
7325 MIB.addReg(t2);
7326
7327 // Generate movc
7328 unsigned t3 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass);
Dale Johannesene4d209d2009-02-03 20:21:25 +00007329 MIB = BuildMI(newMBB, dl, TII->get(cmovOpc),t3);
Mon P Wang63307c32008-05-05 19:05:59 +00007330 MIB.addReg(t2);
7331 MIB.addReg(t1);
7332
7333 // Cmp and exchange if none has modified the memory location
Dale Johannesene4d209d2009-02-03 20:21:25 +00007334 MIB = BuildMI(newMBB, dl, TII->get(X86::LCMPXCHG32));
Mon P Wang63307c32008-05-05 19:05:59 +00007335 for (int i=0; i <= lastAddrIndx; ++i)
7336 (*MIB).addOperand(*argOpers[i]);
7337 MIB.addReg(t3);
Mon P Wangf5952662008-07-17 04:54:06 +00007338 assert(mInstr->hasOneMemOperand() && "Unexpected number of memoperand");
7339 (*MIB).addMemOperand(*F, *mInstr->memoperands_begin());
Scott Michelfdc40a02009-02-17 22:15:04 +00007340
Dale Johannesene4d209d2009-02-03 20:21:25 +00007341 MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rr), destOper.getReg());
Mon P Wang63307c32008-05-05 19:05:59 +00007342 MIB.addReg(X86::EAX);
Scott Michelfdc40a02009-02-17 22:15:04 +00007343
Mon P Wang63307c32008-05-05 19:05:59 +00007344 // insert branch
Dale Johannesene4d209d2009-02-03 20:21:25 +00007345 BuildMI(newMBB, dl, TII->get(X86::JNE)).addMBB(newMBB);
Mon P Wang63307c32008-05-05 19:05:59 +00007346
Dan Gohman8e5f2c62008-07-07 23:14:23 +00007347 F->DeleteMachineInstr(mInstr); // The pseudo instruction is gone now.
Mon P Wang63307c32008-05-05 19:05:59 +00007348 return nextMBB;
7349}
7350
7351
Evan Cheng60c07e12006-07-05 22:17:51 +00007352MachineBasicBlock *
Evan Chengff9b3732008-01-30 18:18:23 +00007353X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
Dan Gohman1fdbc1d2009-02-07 16:15:20 +00007354 MachineBasicBlock *BB) const {
Dale Johannesene4d209d2009-02-03 20:21:25 +00007355 DebugLoc dl = MI->getDebugLoc();
Evan Chengc0f64ff2006-11-27 23:37:22 +00007356 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
Evan Cheng60c07e12006-07-05 22:17:51 +00007357 switch (MI->getOpcode()) {
7358 default: assert(false && "Unexpected instr type to insert");
Mon P Wang9e5ecb82008-12-12 01:25:51 +00007359 case X86::CMOV_V1I64:
Evan Cheng60c07e12006-07-05 22:17:51 +00007360 case X86::CMOV_FR32:
7361 case X86::CMOV_FR64:
7362 case X86::CMOV_V4F32:
7363 case X86::CMOV_V2F64:
Evan Chenge5f62042007-09-29 00:00:36 +00007364 case X86::CMOV_V2I64: {
Evan Cheng60c07e12006-07-05 22:17:51 +00007365 // To "insert" a SELECT_CC instruction, we actually have to insert the
7366 // diamond control-flow pattern. The incoming instruction knows the
7367 // destination vreg to set, the condition code register to branch on, the
7368 // true/false values to select between, and a branch opcode to use.
7369 const BasicBlock *LLVM_BB = BB->getBasicBlock();
Dan Gohman8e5f2c62008-07-07 23:14:23 +00007370 MachineFunction::iterator It = BB;
Evan Cheng60c07e12006-07-05 22:17:51 +00007371 ++It;
Anton Korobeynikov12c49af2006-11-21 00:01:06 +00007372
Evan Cheng60c07e12006-07-05 22:17:51 +00007373 // thisMBB:
7374 // ...
7375 // TrueVal = ...
7376 // cmpTY ccX, r1, r2
7377 // bCC copy1MBB
7378 // fallthrough --> copy0MBB
7379 MachineBasicBlock *thisMBB = BB;
Dan Gohman8e5f2c62008-07-07 23:14:23 +00007380 MachineFunction *F = BB->getParent();
7381 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
7382 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
Anton Korobeynikov12c49af2006-11-21 00:01:06 +00007383 unsigned Opc =
Chris Lattner7fbe9722006-10-20 17:42:20 +00007384 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
Dale Johannesene4d209d2009-02-03 20:21:25 +00007385 BuildMI(BB, dl, TII->get(Opc)).addMBB(sinkMBB);
Dan Gohman8e5f2c62008-07-07 23:14:23 +00007386 F->insert(It, copy0MBB);
7387 F->insert(It, sinkMBB);
Mon P Wang63307c32008-05-05 19:05:59 +00007388 // Update machine-CFG edges by transferring all successors of the current
Evan Cheng60c07e12006-07-05 22:17:51 +00007389 // block to the new block which will contain the Phi node for the select.
Mon P Wang63307c32008-05-05 19:05:59 +00007390 sinkMBB->transferSuccessors(BB);
7391
7392 // Add the true and fallthrough blocks as its successors.
Evan Cheng60c07e12006-07-05 22:17:51 +00007393 BB->addSuccessor(copy0MBB);
7394 BB->addSuccessor(sinkMBB);
Anton Korobeynikov12c49af2006-11-21 00:01:06 +00007395
Evan Cheng60c07e12006-07-05 22:17:51 +00007396 // copy0MBB:
7397 // %FalseValue = ...
7398 // # fallthrough to sinkMBB
7399 BB = copy0MBB;
Anton Korobeynikov12c49af2006-11-21 00:01:06 +00007400
Evan Cheng60c07e12006-07-05 22:17:51 +00007401 // Update machine-CFG edges
7402 BB->addSuccessor(sinkMBB);
Anton Korobeynikov12c49af2006-11-21 00:01:06 +00007403
Evan Cheng60c07e12006-07-05 22:17:51 +00007404 // sinkMBB:
7405 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
7406 // ...
7407 BB = sinkMBB;
Dale Johannesene4d209d2009-02-03 20:21:25 +00007408 BuildMI(BB, dl, TII->get(X86::PHI), MI->getOperand(0).getReg())
Evan Cheng60c07e12006-07-05 22:17:51 +00007409 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
7410 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
7411
Dan Gohman8e5f2c62008-07-07 23:14:23 +00007412 F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
Evan Cheng60c07e12006-07-05 22:17:51 +00007413 return BB;
7414 }
7415
Dale Johannesen849f2142007-07-03 00:53:03 +00007416 case X86::FP32_TO_INT16_IN_MEM:
7417 case X86::FP32_TO_INT32_IN_MEM:
7418 case X86::FP32_TO_INT64_IN_MEM:
7419 case X86::FP64_TO_INT16_IN_MEM:
7420 case X86::FP64_TO_INT32_IN_MEM:
Dale Johannesena996d522007-08-07 01:17:37 +00007421 case X86::FP64_TO_INT64_IN_MEM:
7422 case X86::FP80_TO_INT16_IN_MEM:
7423 case X86::FP80_TO_INT32_IN_MEM:
7424 case X86::FP80_TO_INT64_IN_MEM: {
Evan Cheng60c07e12006-07-05 22:17:51 +00007425 // Change the floating point control register to use "round towards zero"
7426 // mode when truncating to an integer value.
7427 MachineFunction *F = BB->getParent();
7428 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
Dale Johannesene4d209d2009-02-03 20:21:25 +00007429 addFrameReference(BuildMI(BB, dl, TII->get(X86::FNSTCW16m)), CWFrameIdx);
Evan Cheng60c07e12006-07-05 22:17:51 +00007430
7431 // Load the old value of the high byte of the control word...
7432 unsigned OldCW =
Chris Lattner84bc5422007-12-31 04:13:23 +00007433 F->getRegInfo().createVirtualRegister(X86::GR16RegisterClass);
Scott Michelfdc40a02009-02-17 22:15:04 +00007434 addFrameReference(BuildMI(BB, dl, TII->get(X86::MOV16rm), OldCW),
Dale Johannesene4d209d2009-02-03 20:21:25 +00007435 CWFrameIdx);
Evan Cheng60c07e12006-07-05 22:17:51 +00007436
7437 // Set the high part to be round to zero...
Dale Johannesene4d209d2009-02-03 20:21:25 +00007438 addFrameReference(BuildMI(BB, dl, TII->get(X86::MOV16mi)), CWFrameIdx)
Evan Chengc0f64ff2006-11-27 23:37:22 +00007439 .addImm(0xC7F);
Evan Cheng60c07e12006-07-05 22:17:51 +00007440
7441 // Reload the modified control word now...
Dale Johannesene4d209d2009-02-03 20:21:25 +00007442 addFrameReference(BuildMI(BB, dl, TII->get(X86::FLDCW16m)), CWFrameIdx);
Evan Cheng60c07e12006-07-05 22:17:51 +00007443
7444 // Restore the memory image of control word to original value
Dale Johannesene4d209d2009-02-03 20:21:25 +00007445 addFrameReference(BuildMI(BB, dl, TII->get(X86::MOV16mr)), CWFrameIdx)
Evan Chengc0f64ff2006-11-27 23:37:22 +00007446 .addReg(OldCW);
Evan Cheng60c07e12006-07-05 22:17:51 +00007447
7448 // Get the X86 opcode to use.
7449 unsigned Opc;
7450 switch (MI->getOpcode()) {
7451 default: assert(0 && "illegal opcode!");
Dale Johannesene377d4d2007-07-04 21:07:47 +00007452 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
7453 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
7454 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
7455 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
7456 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
7457 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
Dale Johannesena996d522007-08-07 01:17:37 +00007458 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
7459 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
7460 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
Evan Cheng60c07e12006-07-05 22:17:51 +00007461 }
7462
7463 X86AddressMode AM;
7464 MachineOperand &Op = MI->getOperand(0);
Dan Gohmand735b802008-10-03 15:45:36 +00007465 if (Op.isReg()) {
Evan Cheng60c07e12006-07-05 22:17:51 +00007466 AM.BaseType = X86AddressMode::RegBase;
7467 AM.Base.Reg = Op.getReg();
7468 } else {
7469 AM.BaseType = X86AddressMode::FrameIndexBase;
Chris Lattner8aa797a2007-12-30 23:10:15 +00007470 AM.Base.FrameIndex = Op.getIndex();
Evan Cheng60c07e12006-07-05 22:17:51 +00007471 }
7472 Op = MI->getOperand(1);
Dan Gohmand735b802008-10-03 15:45:36 +00007473 if (Op.isImm())
Chris Lattner7fbe9722006-10-20 17:42:20 +00007474 AM.Scale = Op.getImm();
Evan Cheng60c07e12006-07-05 22:17:51 +00007475 Op = MI->getOperand(2);
Dan Gohmand735b802008-10-03 15:45:36 +00007476 if (Op.isImm())
Chris Lattner7fbe9722006-10-20 17:42:20 +00007477 AM.IndexReg = Op.getImm();
Evan Cheng60c07e12006-07-05 22:17:51 +00007478 Op = MI->getOperand(3);
Dan Gohmand735b802008-10-03 15:45:36 +00007479 if (Op.isGlobal()) {
Evan Cheng60c07e12006-07-05 22:17:51 +00007480 AM.GV = Op.getGlobal();
7481 } else {
Chris Lattner7fbe9722006-10-20 17:42:20 +00007482 AM.Disp = Op.getImm();
Evan Cheng60c07e12006-07-05 22:17:51 +00007483 }
Dale Johannesene4d209d2009-02-03 20:21:25 +00007484 addFullAddress(BuildMI(BB, dl, TII->get(Opc)), AM)
Rafael Espindola8ef2b892009-04-08 08:09:33 +00007485 .addReg(MI->getOperand(X86AddrNumOperands).getReg());
Evan Cheng60c07e12006-07-05 22:17:51 +00007486
7487 // Reload the original control word now.
Dale Johannesene4d209d2009-02-03 20:21:25 +00007488 addFrameReference(BuildMI(BB, dl, TII->get(X86::FLDCW16m)), CWFrameIdx);
Evan Cheng60c07e12006-07-05 22:17:51 +00007489
Dan Gohman8e5f2c62008-07-07 23:14:23 +00007490 F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
Evan Cheng60c07e12006-07-05 22:17:51 +00007491 return BB;
7492 }
Mon P Wang63307c32008-05-05 19:05:59 +00007493 case X86::ATOMAND32:
7494 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr,
Scott Michelfdc40a02009-02-17 22:15:04 +00007495 X86::AND32ri, X86::MOV32rm,
Dale Johannesen140be2d2008-08-19 18:47:28 +00007496 X86::LCMPXCHG32, X86::MOV32rr,
7497 X86::NOT32r, X86::EAX,
7498 X86::GR32RegisterClass);
Mon P Wang63307c32008-05-05 19:05:59 +00007499 case X86::ATOMOR32:
Scott Michelfdc40a02009-02-17 22:15:04 +00007500 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR32rr,
7501 X86::OR32ri, X86::MOV32rm,
Dale Johannesen140be2d2008-08-19 18:47:28 +00007502 X86::LCMPXCHG32, X86::MOV32rr,
7503 X86::NOT32r, X86::EAX,
7504 X86::GR32RegisterClass);
Mon P Wang63307c32008-05-05 19:05:59 +00007505 case X86::ATOMXOR32:
7506 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR32rr,
Scott Michelfdc40a02009-02-17 22:15:04 +00007507 X86::XOR32ri, X86::MOV32rm,
Dale Johannesen140be2d2008-08-19 18:47:28 +00007508 X86::LCMPXCHG32, X86::MOV32rr,
7509 X86::NOT32r, X86::EAX,
7510 X86::GR32RegisterClass);
Andrew Lenharth507a58a2008-06-14 05:48:15 +00007511 case X86::ATOMNAND32:
7512 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr,
Dale Johannesen140be2d2008-08-19 18:47:28 +00007513 X86::AND32ri, X86::MOV32rm,
7514 X86::LCMPXCHG32, X86::MOV32rr,
7515 X86::NOT32r, X86::EAX,
7516 X86::GR32RegisterClass, true);
Mon P Wang63307c32008-05-05 19:05:59 +00007517 case X86::ATOMMIN32:
7518 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL32rr);
7519 case X86::ATOMMAX32:
7520 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG32rr);
7521 case X86::ATOMUMIN32:
7522 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB32rr);
7523 case X86::ATOMUMAX32:
7524 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA32rr);
Dale Johannesen140be2d2008-08-19 18:47:28 +00007525
7526 case X86::ATOMAND16:
7527 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr,
7528 X86::AND16ri, X86::MOV16rm,
7529 X86::LCMPXCHG16, X86::MOV16rr,
7530 X86::NOT16r, X86::AX,
7531 X86::GR16RegisterClass);
7532 case X86::ATOMOR16:
Scott Michelfdc40a02009-02-17 22:15:04 +00007533 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR16rr,
Dale Johannesen140be2d2008-08-19 18:47:28 +00007534 X86::OR16ri, X86::MOV16rm,
7535 X86::LCMPXCHG16, X86::MOV16rr,
7536 X86::NOT16r, X86::AX,
7537 X86::GR16RegisterClass);
7538 case X86::ATOMXOR16:
7539 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR16rr,
7540 X86::XOR16ri, X86::MOV16rm,
7541 X86::LCMPXCHG16, X86::MOV16rr,
7542 X86::NOT16r, X86::AX,
7543 X86::GR16RegisterClass);
7544 case X86::ATOMNAND16:
7545 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr,
7546 X86::AND16ri, X86::MOV16rm,
7547 X86::LCMPXCHG16, X86::MOV16rr,
7548 X86::NOT16r, X86::AX,
7549 X86::GR16RegisterClass, true);
7550 case X86::ATOMMIN16:
7551 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL16rr);
7552 case X86::ATOMMAX16:
7553 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG16rr);
7554 case X86::ATOMUMIN16:
7555 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB16rr);
7556 case X86::ATOMUMAX16:
7557 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA16rr);
7558
7559 case X86::ATOMAND8:
7560 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr,
7561 X86::AND8ri, X86::MOV8rm,
7562 X86::LCMPXCHG8, X86::MOV8rr,
7563 X86::NOT8r, X86::AL,
7564 X86::GR8RegisterClass);
7565 case X86::ATOMOR8:
Scott Michelfdc40a02009-02-17 22:15:04 +00007566 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR8rr,
Dale Johannesen140be2d2008-08-19 18:47:28 +00007567 X86::OR8ri, X86::MOV8rm,
7568 X86::LCMPXCHG8, X86::MOV8rr,
7569 X86::NOT8r, X86::AL,
7570 X86::GR8RegisterClass);
7571 case X86::ATOMXOR8:
7572 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR8rr,
7573 X86::XOR8ri, X86::MOV8rm,
7574 X86::LCMPXCHG8, X86::MOV8rr,
7575 X86::NOT8r, X86::AL,
7576 X86::GR8RegisterClass);
7577 case X86::ATOMNAND8:
7578 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr,
7579 X86::AND8ri, X86::MOV8rm,
7580 X86::LCMPXCHG8, X86::MOV8rr,
7581 X86::NOT8r, X86::AL,
7582 X86::GR8RegisterClass, true);
7583 // FIXME: There are no CMOV8 instructions; MIN/MAX need some other way.
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007584 // This group is for 64-bit host.
Dale Johannesena99e3842008-08-20 00:48:50 +00007585 case X86::ATOMAND64:
7586 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr,
Scott Michelfdc40a02009-02-17 22:15:04 +00007587 X86::AND64ri32, X86::MOV64rm,
Dale Johannesena99e3842008-08-20 00:48:50 +00007588 X86::LCMPXCHG64, X86::MOV64rr,
7589 X86::NOT64r, X86::RAX,
7590 X86::GR64RegisterClass);
7591 case X86::ATOMOR64:
Scott Michelfdc40a02009-02-17 22:15:04 +00007592 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR64rr,
7593 X86::OR64ri32, X86::MOV64rm,
Dale Johannesena99e3842008-08-20 00:48:50 +00007594 X86::LCMPXCHG64, X86::MOV64rr,
7595 X86::NOT64r, X86::RAX,
7596 X86::GR64RegisterClass);
7597 case X86::ATOMXOR64:
7598 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR64rr,
Scott Michelfdc40a02009-02-17 22:15:04 +00007599 X86::XOR64ri32, X86::MOV64rm,
Dale Johannesena99e3842008-08-20 00:48:50 +00007600 X86::LCMPXCHG64, X86::MOV64rr,
7601 X86::NOT64r, X86::RAX,
7602 X86::GR64RegisterClass);
7603 case X86::ATOMNAND64:
7604 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr,
7605 X86::AND64ri32, X86::MOV64rm,
7606 X86::LCMPXCHG64, X86::MOV64rr,
7607 X86::NOT64r, X86::RAX,
7608 X86::GR64RegisterClass, true);
7609 case X86::ATOMMIN64:
7610 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL64rr);
7611 case X86::ATOMMAX64:
7612 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG64rr);
7613 case X86::ATOMUMIN64:
7614 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB64rr);
7615 case X86::ATOMUMAX64:
7616 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA64rr);
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007617
7618 // This group does 64-bit operations on a 32-bit host.
7619 case X86::ATOMAND6432:
Scott Michelfdc40a02009-02-17 22:15:04 +00007620 return EmitAtomicBit6432WithCustomInserter(MI, BB,
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007621 X86::AND32rr, X86::AND32rr,
7622 X86::AND32ri, X86::AND32ri,
7623 false);
7624 case X86::ATOMOR6432:
Scott Michelfdc40a02009-02-17 22:15:04 +00007625 return EmitAtomicBit6432WithCustomInserter(MI, BB,
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007626 X86::OR32rr, X86::OR32rr,
7627 X86::OR32ri, X86::OR32ri,
7628 false);
7629 case X86::ATOMXOR6432:
Scott Michelfdc40a02009-02-17 22:15:04 +00007630 return EmitAtomicBit6432WithCustomInserter(MI, BB,
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007631 X86::XOR32rr, X86::XOR32rr,
7632 X86::XOR32ri, X86::XOR32ri,
7633 false);
7634 case X86::ATOMNAND6432:
Scott Michelfdc40a02009-02-17 22:15:04 +00007635 return EmitAtomicBit6432WithCustomInserter(MI, BB,
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007636 X86::AND32rr, X86::AND32rr,
7637 X86::AND32ri, X86::AND32ri,
7638 true);
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007639 case X86::ATOMADD6432:
Scott Michelfdc40a02009-02-17 22:15:04 +00007640 return EmitAtomicBit6432WithCustomInserter(MI, BB,
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007641 X86::ADD32rr, X86::ADC32rr,
7642 X86::ADD32ri, X86::ADC32ri,
7643 false);
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007644 case X86::ATOMSUB6432:
Scott Michelfdc40a02009-02-17 22:15:04 +00007645 return EmitAtomicBit6432WithCustomInserter(MI, BB,
Dale Johannesen48c1bc22008-10-02 18:53:47 +00007646 X86::SUB32rr, X86::SBB32rr,
7647 X86::SUB32ri, X86::SBB32ri,
7648 false);
Dale Johannesen880ae362008-10-03 22:25:52 +00007649 case X86::ATOMSWAP6432:
Scott Michelfdc40a02009-02-17 22:15:04 +00007650 return EmitAtomicBit6432WithCustomInserter(MI, BB,
Dale Johannesen880ae362008-10-03 22:25:52 +00007651 X86::MOV32rr, X86::MOV32rr,
7652 X86::MOV32ri, X86::MOV32ri,
7653 false);
Evan Cheng60c07e12006-07-05 22:17:51 +00007654 }
7655}
7656
7657//===----------------------------------------------------------------------===//
7658// X86 Optimization Hooks
7659//===----------------------------------------------------------------------===//
7660
Dan Gohman475871a2008-07-27 21:46:04 +00007661void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
Dan Gohman977a76f2008-02-13 22:28:48 +00007662 const APInt &Mask,
Dan Gohmanfd29e0e2008-02-13 00:35:47 +00007663 APInt &KnownZero,
7664 APInt &KnownOne,
Dan Gohmanea859be2007-06-22 14:59:07 +00007665 const SelectionDAG &DAG,
Nate Begeman368e18d2006-02-16 21:11:51 +00007666 unsigned Depth) const {
Evan Cheng3a03ebb2005-12-21 23:05:39 +00007667 unsigned Opc = Op.getOpcode();
Evan Cheng865f0602006-04-05 06:11:20 +00007668 assert((Opc >= ISD::BUILTIN_OP_END ||
7669 Opc == ISD::INTRINSIC_WO_CHAIN ||
7670 Opc == ISD::INTRINSIC_W_CHAIN ||
7671 Opc == ISD::INTRINSIC_VOID) &&
7672 "Should use MaskedValueIsZero if you don't know whether Op"
7673 " is a target node!");
Evan Cheng3a03ebb2005-12-21 23:05:39 +00007674
Dan Gohmanf4f92f52008-02-13 23:07:24 +00007675 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); // Don't know anything.
Evan Cheng3a03ebb2005-12-21 23:05:39 +00007676 switch (Opc) {
Evan Cheng865f0602006-04-05 06:11:20 +00007677 default: break;
Evan Cheng97d0e0e2009-02-02 09:15:04 +00007678 case X86ISD::ADD:
7679 case X86ISD::SUB:
7680 case X86ISD::SMUL:
7681 case X86ISD::UMUL:
Dan Gohman076aee32009-03-04 19:44:21 +00007682 case X86ISD::INC:
7683 case X86ISD::DEC:
Evan Cheng97d0e0e2009-02-02 09:15:04 +00007684 // These nodes' second result is a boolean.
7685 if (Op.getResNo() == 0)
7686 break;
7687 // Fallthrough
Anton Korobeynikov12c49af2006-11-21 00:01:06 +00007688 case X86ISD::SETCC:
Dan Gohmanfd29e0e2008-02-13 00:35:47 +00007689 KnownZero |= APInt::getHighBitsSet(Mask.getBitWidth(),
7690 Mask.getBitWidth() - 1);
Nate Begeman368e18d2006-02-16 21:11:51 +00007691 break;
Evan Cheng3a03ebb2005-12-21 23:05:39 +00007692 }
Evan Cheng3a03ebb2005-12-21 23:05:39 +00007693}
Chris Lattner259e97c2006-01-31 19:43:35 +00007694
Evan Cheng206ee9d2006-07-07 08:33:52 +00007695/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
Evan Chengad4196b2008-05-12 19:56:52 +00007696/// node is a GlobalAddress + offset.
7697bool X86TargetLowering::isGAPlusOffset(SDNode *N,
7698 GlobalValue* &GA, int64_t &Offset) const{
7699 if (N->getOpcode() == X86ISD::Wrapper) {
7700 if (isa<GlobalAddressSDNode>(N->getOperand(0))) {
Evan Cheng206ee9d2006-07-07 08:33:52 +00007701 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
Dan Gohman6520e202008-10-18 02:06:02 +00007702 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset();
Evan Cheng206ee9d2006-07-07 08:33:52 +00007703 return true;
7704 }
Evan Cheng206ee9d2006-07-07 08:33:52 +00007705 }
Evan Chengad4196b2008-05-12 19:56:52 +00007706 return TargetLowering::isGAPlusOffset(N, GA, Offset);
Evan Cheng206ee9d2006-07-07 08:33:52 +00007707}
7708
Evan Chengad4196b2008-05-12 19:56:52 +00007709static bool isBaseAlignmentOfN(unsigned N, SDNode *Base,
7710 const TargetLowering &TLI) {
Evan Cheng206ee9d2006-07-07 08:33:52 +00007711 GlobalValue *GV;
Nick Lewycky916a9f02008-02-02 08:29:58 +00007712 int64_t Offset = 0;
Evan Chengad4196b2008-05-12 19:56:52 +00007713 if (TLI.isGAPlusOffset(Base, GV, Offset))
Evan Cheng7e2ff772008-05-08 00:57:18 +00007714 return (GV->getAlignment() >= N && (Offset % N) == 0);
Chris Lattnerba96fbc2008-01-26 20:07:42 +00007715 // DAG combine handles the stack object case.
Evan Cheng206ee9d2006-07-07 08:33:52 +00007716 return false;
7717}
7718
Nate Begeman9008ca62009-04-27 18:41:29 +00007719static bool EltsFromConsecutiveLoads(ShuffleVectorSDNode *N, unsigned NumElems,
Eli Friedman7a5e5552009-06-07 06:52:44 +00007720 MVT EVT, LoadSDNode *&LDBase,
7721 unsigned &LastLoadedElt,
Evan Chengad4196b2008-05-12 19:56:52 +00007722 SelectionDAG &DAG, MachineFrameInfo *MFI,
7723 const TargetLowering &TLI) {
Eli Friedman7a5e5552009-06-07 06:52:44 +00007724 LDBase = NULL;
Anton Korobeynikovb51b6cf2009-06-09 23:00:39 +00007725 LastLoadedElt = -1U;
Evan Cheng7e2ff772008-05-08 00:57:18 +00007726 for (unsigned i = 0; i < NumElems; ++i) {
Nate Begeman9008ca62009-04-27 18:41:29 +00007727 if (N->getMaskElt(i) < 0) {
Eli Friedman7a5e5552009-06-07 06:52:44 +00007728 if (!LDBase)
Evan Cheng7e2ff772008-05-08 00:57:18 +00007729 return false;
7730 continue;
7731 }
7732
Dan Gohman475871a2008-07-27 21:46:04 +00007733 SDValue Elt = DAG.getShuffleScalarElt(N, i);
Gabor Greifba36cb52008-08-28 21:40:38 +00007734 if (!Elt.getNode() ||
7735 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode())))
Evan Cheng7e2ff772008-05-08 00:57:18 +00007736 return false;
Eli Friedman7a5e5552009-06-07 06:52:44 +00007737 if (!LDBase) {
7738 if (Elt.getNode()->getOpcode() == ISD::UNDEF)
Evan Cheng50d9e722008-05-10 06:46:49 +00007739 return false;
Eli Friedman7a5e5552009-06-07 06:52:44 +00007740 LDBase = cast<LoadSDNode>(Elt.getNode());
7741 LastLoadedElt = i;
Evan Cheng7e2ff772008-05-08 00:57:18 +00007742 continue;
7743 }
7744 if (Elt.getOpcode() == ISD::UNDEF)
7745 continue;
7746
Nate Begemanabc01992009-06-05 21:37:30 +00007747 LoadSDNode *LD = cast<LoadSDNode>(Elt);
Nate Begemanabc01992009-06-05 21:37:30 +00007748 if (!TLI.isConsecutiveLoad(LD, LDBase, EVT.getSizeInBits()/8, i, MFI))
Evan Cheng7e2ff772008-05-08 00:57:18 +00007749 return false;
Eli Friedman7a5e5552009-06-07 06:52:44 +00007750 LastLoadedElt = i;
Evan Cheng7e2ff772008-05-08 00:57:18 +00007751 }
7752 return true;
7753}
Evan Cheng206ee9d2006-07-07 08:33:52 +00007754
7755/// PerformShuffleCombine - Combine a vector_shuffle that is equal to
7756/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load
7757/// if the load addresses are consecutive, non-overlapping, and in the right
Mon P Wang1e955802009-04-03 02:43:30 +00007758/// order. In the case of v2i64, it will see if it can rewrite the
7759/// shuffle to be an appropriate build vector so it can take advantage of
7760// performBuildVectorCombine.
Dan Gohman475871a2008-07-27 21:46:04 +00007761static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
Nate Begeman9008ca62009-04-27 18:41:29 +00007762 const TargetLowering &TLI) {
Dale Johannesene4d209d2009-02-03 20:21:25 +00007763 DebugLoc dl = N->getDebugLoc();
Duncan Sands83ec4b62008-06-06 12:08:01 +00007764 MVT VT = N->getValueType(0);
7765 MVT EVT = VT.getVectorElementType();
Nate Begeman9008ca62009-04-27 18:41:29 +00007766 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
7767 unsigned NumElems = VT.getVectorNumElements();
Mon P Wang1e955802009-04-03 02:43:30 +00007768
Eli Friedman7a5e5552009-06-07 06:52:44 +00007769 if (VT.getSizeInBits() != 128)
7770 return SDValue();
7771
Mon P Wang1e955802009-04-03 02:43:30 +00007772 // Try to combine a vector_shuffle into a 128-bit load.
7773 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
Eli Friedman7a5e5552009-06-07 06:52:44 +00007774 LoadSDNode *LD = NULL;
7775 unsigned LastLoadedElt;
7776 if (!EltsFromConsecutiveLoads(SVN, NumElems, EVT, LD, LastLoadedElt, DAG,
7777 MFI, TLI))
Dan Gohman475871a2008-07-27 21:46:04 +00007778 return SDValue();
Evan Cheng206ee9d2006-07-07 08:33:52 +00007779
Eli Friedman7a5e5552009-06-07 06:52:44 +00007780 if (LastLoadedElt == NumElems - 1) {
7781 if (isBaseAlignmentOfN(16, LD->getBasePtr().getNode(), TLI))
7782 return DAG.getLoad(VT, dl, LD->getChain(), LD->getBasePtr(),
7783 LD->getSrcValue(), LD->getSrcValueOffset(),
7784 LD->isVolatile());
Dale Johannesene4d209d2009-02-03 20:21:25 +00007785 return DAG.getLoad(VT, dl, LD->getChain(), LD->getBasePtr(),
Scott Michelfdc40a02009-02-17 22:15:04 +00007786 LD->getSrcValue(), LD->getSrcValueOffset(),
Eli Friedman7a5e5552009-06-07 06:52:44 +00007787 LD->isVolatile(), LD->getAlignment());
7788 } else if (NumElems == 4 && LastLoadedElt == 1) {
7789 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
Nate Begemanabc01992009-06-05 21:37:30 +00007790 SDValue Ops[] = { LD->getChain(), LD->getBasePtr() };
7791 SDValue ResNode = DAG.getNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, 2);
Nate Begemanabc01992009-06-05 21:37:30 +00007792 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, ResNode);
7793 }
7794 return SDValue();
Scott Michelfdc40a02009-02-17 22:15:04 +00007795}
Evan Chengd880b972008-05-09 21:53:03 +00007796
Chris Lattner83e6c992006-10-04 06:57:07 +00007797/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes.
Dan Gohman475871a2008-07-27 21:46:04 +00007798static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
Chris Lattner47b4ce82009-03-11 05:48:52 +00007799 const X86Subtarget *Subtarget) {
7800 DebugLoc DL = N->getDebugLoc();
Dan Gohman475871a2008-07-27 21:46:04 +00007801 SDValue Cond = N->getOperand(0);
Chris Lattner47b4ce82009-03-11 05:48:52 +00007802 // Get the LHS/RHS of the select.
7803 SDValue LHS = N->getOperand(1);
7804 SDValue RHS = N->getOperand(2);
7805
Chris Lattner83e6c992006-10-04 06:57:07 +00007806 // If we have SSE[12] support, try to form min/max nodes.
7807 if (Subtarget->hasSSE2() &&
Chris Lattner47b4ce82009-03-11 05:48:52 +00007808 (LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64) &&
7809 Cond.getOpcode() == ISD::SETCC) {
7810 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
Anton Korobeynikov12c49af2006-11-21 00:01:06 +00007811
Chris Lattner47b4ce82009-03-11 05:48:52 +00007812 unsigned Opcode = 0;
7813 if (LHS == Cond.getOperand(0) && RHS == Cond.getOperand(1)) {
7814 switch (CC) {
7815 default: break;
7816 case ISD::SETOLE: // (X <= Y) ? X : Y -> min
7817 case ISD::SETULE:
7818 case ISD::SETLE:
7819 if (!UnsafeFPMath) break;
7820 // FALL THROUGH.
7821 case ISD::SETOLT: // (X olt/lt Y) ? X : Y -> min
7822 case ISD::SETLT:
7823 Opcode = X86ISD::FMIN;
7824 break;
Anton Korobeynikov12c49af2006-11-21 00:01:06 +00007825
Chris Lattner47b4ce82009-03-11 05:48:52 +00007826 case ISD::SETOGT: // (X > Y) ? X : Y -> max
7827 case ISD::SETUGT:
7828 case ISD::SETGT:
7829 if (!UnsafeFPMath) break;
7830 // FALL THROUGH.
7831 case ISD::SETUGE: // (X uge/ge Y) ? X : Y -> max
7832 case ISD::SETGE:
7833 Opcode = X86ISD::FMAX;
7834 break;
Chris Lattner83e6c992006-10-04 06:57:07 +00007835 }
Chris Lattner47b4ce82009-03-11 05:48:52 +00007836 } else if (LHS == Cond.getOperand(1) && RHS == Cond.getOperand(0)) {
7837 switch (CC) {
7838 default: break;
7839 case ISD::SETOGT: // (X > Y) ? Y : X -> min
7840 case ISD::SETUGT:
7841 case ISD::SETGT:
7842 if (!UnsafeFPMath) break;
7843 // FALL THROUGH.
7844 case ISD::SETUGE: // (X uge/ge Y) ? Y : X -> min
7845 case ISD::SETGE:
7846 Opcode = X86ISD::FMIN;
7847 break;
Anton Korobeynikov12c49af2006-11-21 00:01:06 +00007848
Chris Lattner47b4ce82009-03-11 05:48:52 +00007849 case ISD::SETOLE: // (X <= Y) ? Y : X -> max
7850 case ISD::SETULE:
7851 case ISD::SETLE:
7852 if (!UnsafeFPMath) break;
7853 // FALL THROUGH.
7854 case ISD::SETOLT: // (X olt/lt Y) ? Y : X -> max
7855 case ISD::SETLT:
7856 Opcode = X86ISD::FMAX;
7857 break;
7858 }
Chris Lattner83e6c992006-10-04 06:57:07 +00007859 }
Anton Korobeynikov12c49af2006-11-21 00:01:06 +00007860
Chris Lattner47b4ce82009-03-11 05:48:52 +00007861 if (Opcode)
7862 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
Chris Lattner83e6c992006-10-04 06:57:07 +00007863 }
Chris Lattner47b4ce82009-03-11 05:48:52 +00007864
Chris Lattnerd1980a52009-03-12 06:52:53 +00007865 // If this is a select between two integer constants, try to do some
7866 // optimizations.
Chris Lattnercee56e72009-03-13 05:53:31 +00007867 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) {
7868 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS))
Chris Lattnerd1980a52009-03-12 06:52:53 +00007869 // Don't do this for crazy integer types.
7870 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) {
7871 // If this is efficiently invertible, canonicalize the LHSC/RHSC values
Chris Lattnercee56e72009-03-13 05:53:31 +00007872 // so that TrueC (the true value) is larger than FalseC.
Chris Lattnerd1980a52009-03-12 06:52:53 +00007873 bool NeedsCondInvert = false;
7874
Chris Lattnercee56e72009-03-13 05:53:31 +00007875 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) &&
Chris Lattnerd1980a52009-03-12 06:52:53 +00007876 // Efficiently invertible.
7877 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible.
7878 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible.
7879 isa<ConstantSDNode>(Cond.getOperand(1))))) {
7880 NeedsCondInvert = true;
Chris Lattnercee56e72009-03-13 05:53:31 +00007881 std::swap(TrueC, FalseC);
Chris Lattnerd1980a52009-03-12 06:52:53 +00007882 }
7883
7884 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0.
Chris Lattnercee56e72009-03-13 05:53:31 +00007885 if (FalseC->getAPIntValue() == 0 &&
7886 TrueC->getAPIntValue().isPowerOf2()) {
Chris Lattnerd1980a52009-03-12 06:52:53 +00007887 if (NeedsCondInvert) // Invert the condition if needed.
7888 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
7889 DAG.getConstant(1, Cond.getValueType()));
7890
7891 // Zero extend the condition if needed.
7892 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond);
7893
Chris Lattnercee56e72009-03-13 05:53:31 +00007894 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
Chris Lattnerd1980a52009-03-12 06:52:53 +00007895 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond,
7896 DAG.getConstant(ShAmt, MVT::i8));
7897 }
Chris Lattner97a29a52009-03-13 05:22:11 +00007898
7899 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.
Chris Lattnercee56e72009-03-13 05:53:31 +00007900 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
Chris Lattner97a29a52009-03-13 05:22:11 +00007901 if (NeedsCondInvert) // Invert the condition if needed.
7902 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
7903 DAG.getConstant(1, Cond.getValueType()));
7904
7905 // Zero extend the condition if needed.
Chris Lattnercee56e72009-03-13 05:53:31 +00007906 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
7907 FalseC->getValueType(0), Cond);
Chris Lattner97a29a52009-03-13 05:22:11 +00007908 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
Chris Lattnercee56e72009-03-13 05:53:31 +00007909 SDValue(FalseC, 0));
Chris Lattner97a29a52009-03-13 05:22:11 +00007910 }
Chris Lattnercee56e72009-03-13 05:53:31 +00007911
7912 // Optimize cases that will turn into an LEA instruction. This requires
7913 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
7914 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
7915 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
7916 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
7917
7918 bool isFastMultiplier = false;
7919 if (Diff < 10) {
7920 switch ((unsigned char)Diff) {
7921 default: break;
7922 case 1: // result = add base, cond
7923 case 2: // result = lea base( , cond*2)
7924 case 3: // result = lea base(cond, cond*2)
7925 case 4: // result = lea base( , cond*4)
7926 case 5: // result = lea base(cond, cond*4)
7927 case 8: // result = lea base( , cond*8)
7928 case 9: // result = lea base(cond, cond*8)
7929 isFastMultiplier = true;
7930 break;
7931 }
7932 }
7933
7934 if (isFastMultiplier) {
7935 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
7936 if (NeedsCondInvert) // Invert the condition if needed.
7937 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
7938 DAG.getConstant(1, Cond.getValueType()));
7939
7940 // Zero extend the condition if needed.
7941 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
7942 Cond);
7943 // Scale the condition by the difference.
7944 if (Diff != 1)
7945 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
7946 DAG.getConstant(Diff, Cond.getValueType()));
7947
7948 // Add the base if non-zero.
7949 if (FalseC->getAPIntValue() != 0)
7950 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
7951 SDValue(FalseC, 0));
7952 return Cond;
7953 }
7954 }
Chris Lattnerd1980a52009-03-12 06:52:53 +00007955 }
7956 }
7957
Dan Gohman475871a2008-07-27 21:46:04 +00007958 return SDValue();
Chris Lattner83e6c992006-10-04 06:57:07 +00007959}
7960
Chris Lattnerd1980a52009-03-12 06:52:53 +00007961/// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
7962static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
7963 TargetLowering::DAGCombinerInfo &DCI) {
7964 DebugLoc DL = N->getDebugLoc();
7965
7966 // If the flag operand isn't dead, don't touch this CMOV.
7967 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty())
7968 return SDValue();
7969
7970 // If this is a select between two integer constants, try to do some
7971 // optimizations. Note that the operands are ordered the opposite of SELECT
7972 // operands.
7973 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
7974 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
7975 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
7976 // larger than FalseC (the false value).
7977 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
7978
7979 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
7980 CC = X86::GetOppositeBranchCondition(CC);
7981 std::swap(TrueC, FalseC);
7982 }
7983
7984 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
Chris Lattnercee56e72009-03-13 05:53:31 +00007985 // This is efficient for any integer data type (including i8/i16) and
7986 // shift amount.
Chris Lattnerd1980a52009-03-12 06:52:53 +00007987 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
7988 SDValue Cond = N->getOperand(3);
7989 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
7990 DAG.getConstant(CC, MVT::i8), Cond);
7991
7992 // Zero extend the condition if needed.
7993 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
7994
7995 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
7996 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
7997 DAG.getConstant(ShAmt, MVT::i8));
7998 if (N->getNumValues() == 2) // Dead flag value?
7999 return DCI.CombineTo(N, Cond, SDValue());
8000 return Cond;
8001 }
Chris Lattnercee56e72009-03-13 05:53:31 +00008002
8003 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
8004 // for any integer data type, including i8/i16.
Chris Lattner97a29a52009-03-13 05:22:11 +00008005 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
8006 SDValue Cond = N->getOperand(3);
8007 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
8008 DAG.getConstant(CC, MVT::i8), Cond);
8009
8010 // Zero extend the condition if needed.
Chris Lattnercee56e72009-03-13 05:53:31 +00008011 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
8012 FalseC->getValueType(0), Cond);
Chris Lattner97a29a52009-03-13 05:22:11 +00008013 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
8014 SDValue(FalseC, 0));
Chris Lattnercee56e72009-03-13 05:53:31 +00008015
Chris Lattner97a29a52009-03-13 05:22:11 +00008016 if (N->getNumValues() == 2) // Dead flag value?
8017 return DCI.CombineTo(N, Cond, SDValue());
8018 return Cond;
8019 }
Chris Lattnercee56e72009-03-13 05:53:31 +00008020
8021 // Optimize cases that will turn into an LEA instruction. This requires
8022 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
8023 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
8024 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
8025 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
8026
8027 bool isFastMultiplier = false;
8028 if (Diff < 10) {
8029 switch ((unsigned char)Diff) {
8030 default: break;
8031 case 1: // result = add base, cond
8032 case 2: // result = lea base( , cond*2)
8033 case 3: // result = lea base(cond, cond*2)
8034 case 4: // result = lea base( , cond*4)
8035 case 5: // result = lea base(cond, cond*4)
8036 case 8: // result = lea base( , cond*8)
8037 case 9: // result = lea base(cond, cond*8)
8038 isFastMultiplier = true;
8039 break;
8040 }
8041 }
8042
8043 if (isFastMultiplier) {
8044 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
8045 SDValue Cond = N->getOperand(3);
8046 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
8047 DAG.getConstant(CC, MVT::i8), Cond);
8048 // Zero extend the condition if needed.
8049 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
8050 Cond);
8051 // Scale the condition by the difference.
8052 if (Diff != 1)
8053 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
8054 DAG.getConstant(Diff, Cond.getValueType()));
8055
8056 // Add the base if non-zero.
8057 if (FalseC->getAPIntValue() != 0)
8058 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
8059 SDValue(FalseC, 0));
8060 if (N->getNumValues() == 2) // Dead flag value?
8061 return DCI.CombineTo(N, Cond, SDValue());
8062 return Cond;
8063 }
8064 }
Chris Lattnerd1980a52009-03-12 06:52:53 +00008065 }
8066 }
8067 return SDValue();
8068}
8069
8070
Evan Cheng0b0cd912009-03-28 05:57:29 +00008071/// PerformMulCombine - Optimize a single multiply with constant into two
8072/// in order to implement it with two cheaper instructions, e.g.
8073/// LEA + SHL, LEA + LEA.
8074static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG,
8075 TargetLowering::DAGCombinerInfo &DCI) {
8076 if (DAG.getMachineFunction().
8077 getFunction()->hasFnAttr(Attribute::OptimizeForSize))
8078 return SDValue();
8079
8080 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
8081 return SDValue();
8082
8083 MVT VT = N->getValueType(0);
8084 if (VT != MVT::i64)
8085 return SDValue();
8086
8087 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
8088 if (!C)
8089 return SDValue();
8090 uint64_t MulAmt = C->getZExtValue();
8091 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9)
8092 return SDValue();
8093
8094 uint64_t MulAmt1 = 0;
8095 uint64_t MulAmt2 = 0;
8096 if ((MulAmt % 9) == 0) {
8097 MulAmt1 = 9;
8098 MulAmt2 = MulAmt / 9;
8099 } else if ((MulAmt % 5) == 0) {
8100 MulAmt1 = 5;
8101 MulAmt2 = MulAmt / 5;
8102 } else if ((MulAmt % 3) == 0) {
8103 MulAmt1 = 3;
8104 MulAmt2 = MulAmt / 3;
8105 }
8106 if (MulAmt2 &&
8107 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){
8108 DebugLoc DL = N->getDebugLoc();
8109
8110 if (isPowerOf2_64(MulAmt2) &&
8111 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD))
8112 // If second multiplifer is pow2, issue it first. We want the multiply by
8113 // 3, 5, or 9 to be folded into the addressing mode unless the lone use
8114 // is an add.
8115 std::swap(MulAmt1, MulAmt2);
8116
8117 SDValue NewMul;
8118 if (isPowerOf2_64(MulAmt1))
8119 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
8120 DAG.getConstant(Log2_64(MulAmt1), MVT::i8));
8121 else
Evan Cheng73f24c92009-03-30 21:36:47 +00008122 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
Evan Cheng0b0cd912009-03-28 05:57:29 +00008123 DAG.getConstant(MulAmt1, VT));
8124
8125 if (isPowerOf2_64(MulAmt2))
8126 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
8127 DAG.getConstant(Log2_64(MulAmt2), MVT::i8));
8128 else
Evan Cheng73f24c92009-03-30 21:36:47 +00008129 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
Evan Cheng0b0cd912009-03-28 05:57:29 +00008130 DAG.getConstant(MulAmt2, VT));
8131
8132 // Do not add new nodes to DAG combiner worklist.
8133 DCI.CombineTo(N, NewMul, false);
8134 }
8135 return SDValue();
8136}
8137
8138
Nate Begeman740ab032009-01-26 00:52:55 +00008139/// PerformShiftCombine - Transforms vector shift nodes to use vector shifts
8140/// when possible.
8141static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
8142 const X86Subtarget *Subtarget) {
8143 // On X86 with SSE2 support, we can transform this to a vector shift if
8144 // all elements are shifted by the same amount. We can't do this in legalize
8145 // because the a constant vector is typically transformed to a constant pool
8146 // so we have no knowledge of the shift amount.
Nate Begemanc2fd67f2009-01-26 03:15:31 +00008147 if (!Subtarget->hasSSE2())
8148 return SDValue();
Scott Michelfdc40a02009-02-17 22:15:04 +00008149
Nate Begeman740ab032009-01-26 00:52:55 +00008150 MVT VT = N->getValueType(0);
Nate Begemanc2fd67f2009-01-26 03:15:31 +00008151 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16)
8152 return SDValue();
Scott Michelfdc40a02009-02-17 22:15:04 +00008153
Mon P Wang3becd092009-01-28 08:12:05 +00008154 SDValue ShAmtOp = N->getOperand(1);
8155 MVT EltVT = VT.getVectorElementType();
Chris Lattner47b4ce82009-03-11 05:48:52 +00008156 DebugLoc DL = N->getDebugLoc();
Mon P Wang3becd092009-01-28 08:12:05 +00008157 SDValue BaseShAmt;
8158 if (ShAmtOp.getOpcode() == ISD::BUILD_VECTOR) {
8159 unsigned NumElts = VT.getVectorNumElements();
8160 unsigned i = 0;
8161 for (; i != NumElts; ++i) {
8162 SDValue Arg = ShAmtOp.getOperand(i);
8163 if (Arg.getOpcode() == ISD::UNDEF) continue;
8164 BaseShAmt = Arg;
8165 break;
8166 }
8167 for (; i != NumElts; ++i) {
8168 SDValue Arg = ShAmtOp.getOperand(i);
8169 if (Arg.getOpcode() == ISD::UNDEF) continue;
8170 if (Arg != BaseShAmt) {
8171 return SDValue();
8172 }
8173 }
8174 } else if (ShAmtOp.getOpcode() == ISD::VECTOR_SHUFFLE &&
Nate Begeman9008ca62009-04-27 18:41:29 +00008175 cast<ShuffleVectorSDNode>(ShAmtOp)->isSplat()) {
8176 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, ShAmtOp,
8177 DAG.getIntPtrConstant(0));
Mon P Wang3becd092009-01-28 08:12:05 +00008178 } else
Nate Begemanc2fd67f2009-01-26 03:15:31 +00008179 return SDValue();
Nate Begeman740ab032009-01-26 00:52:55 +00008180
Nate Begemanc2fd67f2009-01-26 03:15:31 +00008181 if (EltVT.bitsGT(MVT::i32))
Chris Lattner47b4ce82009-03-11 05:48:52 +00008182 BaseShAmt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, BaseShAmt);
Nate Begemanc2fd67f2009-01-26 03:15:31 +00008183 else if (EltVT.bitsLT(MVT::i32))
Chris Lattner47b4ce82009-03-11 05:48:52 +00008184 BaseShAmt = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, BaseShAmt);
Nate Begeman740ab032009-01-26 00:52:55 +00008185
Nate Begemanc2fd67f2009-01-26 03:15:31 +00008186 // The shift amount is identical so we can do a vector shift.
8187 SDValue ValOp = N->getOperand(0);
8188 switch (N->getOpcode()) {
8189 default:
8190 assert(0 && "Unknown shift opcode!");
8191 break;
8192 case ISD::SHL:
8193 if (VT == MVT::v2i64)
Chris Lattner47b4ce82009-03-11 05:48:52 +00008194 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
Nate Begeman740ab032009-01-26 00:52:55 +00008195 DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32),
8196 ValOp, BaseShAmt);
Nate Begemanc2fd67f2009-01-26 03:15:31 +00008197 if (VT == MVT::v4i32)
Chris Lattner47b4ce82009-03-11 05:48:52 +00008198 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
Nate Begeman740ab032009-01-26 00:52:55 +00008199 DAG.getConstant(Intrinsic::x86_sse2_pslli_d, MVT::i32),
8200 ValOp, BaseShAmt);
Nate Begemanc2fd67f2009-01-26 03:15:31 +00008201 if (VT == MVT::v8i16)
Chris Lattner47b4ce82009-03-11 05:48:52 +00008202 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
Nate Begeman740ab032009-01-26 00:52:55 +00008203 DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32),
8204 ValOp, BaseShAmt);
Nate Begemanc2fd67f2009-01-26 03:15:31 +00008205 break;
8206 case ISD::SRA:
8207 if (VT == MVT::v4i32)
Chris Lattner47b4ce82009-03-11 05:48:52 +00008208 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
Nate Begeman740ab032009-01-26 00:52:55 +00008209 DAG.getConstant(Intrinsic::x86_sse2_psrai_d, MVT::i32),
8210 ValOp, BaseShAmt);
Nate Begemanc2fd67f2009-01-26 03:15:31 +00008211 if (VT == MVT::v8i16)
Chris Lattner47b4ce82009-03-11 05:48:52 +00008212 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
Nate Begeman740ab032009-01-26 00:52:55 +00008213 DAG.getConstant(Intrinsic::x86_sse2_psrai_w, MVT::i32),
8214 ValOp, BaseShAmt);
Nate Begemanc2fd67f2009-01-26 03:15:31 +00008215 break;
8216 case ISD::SRL:
8217 if (VT == MVT::v2i64)
Chris Lattner47b4ce82009-03-11 05:48:52 +00008218 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
Nate Begeman740ab032009-01-26 00:52:55 +00008219 DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32),
8220 ValOp, BaseShAmt);
Nate Begemanc2fd67f2009-01-26 03:15:31 +00008221 if (VT == MVT::v4i32)
Chris Lattner47b4ce82009-03-11 05:48:52 +00008222 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
Nate Begeman740ab032009-01-26 00:52:55 +00008223 DAG.getConstant(Intrinsic::x86_sse2_psrli_d, MVT::i32),
8224 ValOp, BaseShAmt);
Nate Begemanc2fd67f2009-01-26 03:15:31 +00008225 if (VT == MVT::v8i16)
Chris Lattner47b4ce82009-03-11 05:48:52 +00008226 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
Nate Begeman740ab032009-01-26 00:52:55 +00008227 DAG.getConstant(Intrinsic::x86_sse2_psrli_w, MVT::i32),
8228 ValOp, BaseShAmt);
Nate Begemanc2fd67f2009-01-26 03:15:31 +00008229 break;
Nate Begeman740ab032009-01-26 00:52:55 +00008230 }
8231 return SDValue();
8232}
8233
Chris Lattner149a4e52008-02-22 02:09:43 +00008234/// PerformSTORECombine - Do target-specific dag combines on STORE nodes.
Dan Gohman475871a2008-07-27 21:46:04 +00008235static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
Evan Cheng536e6672009-03-12 05:59:15 +00008236 const X86Subtarget *Subtarget) {
Chris Lattner149a4e52008-02-22 02:09:43 +00008237 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
8238 // the FP state in cases where an emms may be missing.
Dale Johannesen079f2a62008-02-25 19:20:14 +00008239 // A preferable solution to the general problem is to figure out the right
8240 // places to insert EMMS. This qualifies as a quick hack.
Evan Cheng536e6672009-03-12 05:59:15 +00008241
8242 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
Evan Cheng7e2ff772008-05-08 00:57:18 +00008243 StoreSDNode *St = cast<StoreSDNode>(N);
Evan Cheng536e6672009-03-12 05:59:15 +00008244 MVT VT = St->getValue().getValueType();
8245 if (VT.getSizeInBits() != 64)
8246 return SDValue();
8247
Devang Patel578efa92009-06-05 21:57:13 +00008248 const Function *F = DAG.getMachineFunction().getFunction();
8249 bool NoImplicitFloatOps = F->hasFnAttr(Attribute::NoImplicitFloat);
8250 bool F64IsLegal = !UseSoftFloat && !NoImplicitFloatOps
8251 && Subtarget->hasSSE2();
Evan Cheng536e6672009-03-12 05:59:15 +00008252 if ((VT.isVector() ||
8253 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) &&
Dale Johannesen079f2a62008-02-25 19:20:14 +00008254 isa<LoadSDNode>(St->getValue()) &&
8255 !cast<LoadSDNode>(St->getValue())->isVolatile() &&
8256 St->getChain().hasOneUse() && !St->isVolatile()) {
Gabor Greifba36cb52008-08-28 21:40:38 +00008257 SDNode* LdVal = St->getValue().getNode();
Dale Johannesen079f2a62008-02-25 19:20:14 +00008258 LoadSDNode *Ld = 0;
8259 int TokenFactorIndex = -1;
Dan Gohman475871a2008-07-27 21:46:04 +00008260 SmallVector<SDValue, 8> Ops;
Gabor Greifba36cb52008-08-28 21:40:38 +00008261 SDNode* ChainVal = St->getChain().getNode();
Dale Johannesen079f2a62008-02-25 19:20:14 +00008262 // Must be a store of a load. We currently handle two cases: the load
8263 // is a direct child, and it's under an intervening TokenFactor. It is
8264 // possible to dig deeper under nested TokenFactors.
Dale Johannesen14e2ea92008-02-25 22:29:22 +00008265 if (ChainVal == LdVal)
Dale Johannesen079f2a62008-02-25 19:20:14 +00008266 Ld = cast<LoadSDNode>(St->getChain());
8267 else if (St->getValue().hasOneUse() &&
8268 ChainVal->getOpcode() == ISD::TokenFactor) {
8269 for (unsigned i=0, e = ChainVal->getNumOperands(); i != e; ++i) {
Gabor Greifba36cb52008-08-28 21:40:38 +00008270 if (ChainVal->getOperand(i).getNode() == LdVal) {
Dale Johannesen079f2a62008-02-25 19:20:14 +00008271 TokenFactorIndex = i;
8272 Ld = cast<LoadSDNode>(St->getValue());
8273 } else
8274 Ops.push_back(ChainVal->getOperand(i));
8275 }
8276 }
Dale Johannesen079f2a62008-02-25 19:20:14 +00008277
Evan Cheng536e6672009-03-12 05:59:15 +00008278 if (!Ld || !ISD::isNormalLoad(Ld))
8279 return SDValue();
Dale Johannesen079f2a62008-02-25 19:20:14 +00008280
Evan Cheng536e6672009-03-12 05:59:15 +00008281 // If this is not the MMX case, i.e. we are just turning i64 load/store
8282 // into f64 load/store, avoid the transformation if there are multiple
8283 // uses of the loaded value.
8284 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
8285 return SDValue();
Dale Johannesen079f2a62008-02-25 19:20:14 +00008286
Evan Cheng536e6672009-03-12 05:59:15 +00008287 DebugLoc LdDL = Ld->getDebugLoc();
8288 DebugLoc StDL = N->getDebugLoc();
8289 // If we are a 64-bit capable x86, lower to a single movq load/store pair.
8290 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
8291 // pair instead.
8292 if (Subtarget->is64Bit() || F64IsLegal) {
8293 MVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64;
8294 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(),
8295 Ld->getBasePtr(), Ld->getSrcValue(),
8296 Ld->getSrcValueOffset(), Ld->isVolatile(),
8297 Ld->getAlignment());
8298 SDValue NewChain = NewLd.getValue(1);
Dale Johannesen079f2a62008-02-25 19:20:14 +00008299 if (TokenFactorIndex != -1) {
Evan Cheng536e6672009-03-12 05:59:15 +00008300 Ops.push_back(NewChain);
8301 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0],
Dale Johannesen079f2a62008-02-25 19:20:14 +00008302 Ops.size());
8303 }
Evan Cheng536e6672009-03-12 05:59:15 +00008304 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(),
Chris Lattner149a4e52008-02-22 02:09:43 +00008305 St->getSrcValue(), St->getSrcValueOffset(),
8306 St->isVolatile(), St->getAlignment());
8307 }
Evan Cheng536e6672009-03-12 05:59:15 +00008308
8309 // Otherwise, lower to two pairs of 32-bit loads / stores.
8310 SDValue LoAddr = Ld->getBasePtr();
8311 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr,
8312 DAG.getConstant(4, MVT::i32));
8313
8314 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
8315 Ld->getSrcValue(), Ld->getSrcValueOffset(),
8316 Ld->isVolatile(), Ld->getAlignment());
8317 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
8318 Ld->getSrcValue(), Ld->getSrcValueOffset()+4,
8319 Ld->isVolatile(),
8320 MinAlign(Ld->getAlignment(), 4));
8321
8322 SDValue NewChain = LoLd.getValue(1);
8323 if (TokenFactorIndex != -1) {
8324 Ops.push_back(LoLd);
8325 Ops.push_back(HiLd);
8326 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0],
8327 Ops.size());
8328 }
8329
8330 LoAddr = St->getBasePtr();
8331 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr,
8332 DAG.getConstant(4, MVT::i32));
8333
8334 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr,
8335 St->getSrcValue(), St->getSrcValueOffset(),
8336 St->isVolatile(), St->getAlignment());
8337 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr,
8338 St->getSrcValue(),
8339 St->getSrcValueOffset() + 4,
8340 St->isVolatile(),
8341 MinAlign(St->getAlignment(), 4));
8342 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
Chris Lattner149a4e52008-02-22 02:09:43 +00008343 }
Dan Gohman475871a2008-07-27 21:46:04 +00008344 return SDValue();
Chris Lattner149a4e52008-02-22 02:09:43 +00008345}
8346
Chris Lattner6cf73262008-01-25 06:14:17 +00008347/// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and
8348/// X86ISD::FXOR nodes.
Dan Gohman475871a2008-07-27 21:46:04 +00008349static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) {
Chris Lattner6cf73262008-01-25 06:14:17 +00008350 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
8351 // F[X]OR(0.0, x) -> x
8352 // F[X]OR(x, 0.0) -> x
Chris Lattneraf723b92008-01-25 05:46:26 +00008353 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
8354 if (C->getValueAPF().isPosZero())
8355 return N->getOperand(1);
8356 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
8357 if (C->getValueAPF().isPosZero())
8358 return N->getOperand(0);
Dan Gohman475871a2008-07-27 21:46:04 +00008359 return SDValue();
Chris Lattneraf723b92008-01-25 05:46:26 +00008360}
8361
8362/// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes.
Dan Gohman475871a2008-07-27 21:46:04 +00008363static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) {
Chris Lattneraf723b92008-01-25 05:46:26 +00008364 // FAND(0.0, x) -> 0.0
8365 // FAND(x, 0.0) -> 0.0
8366 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
8367 if (C->getValueAPF().isPosZero())
8368 return N->getOperand(0);
8369 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
8370 if (C->getValueAPF().isPosZero())
8371 return N->getOperand(1);
Dan Gohman475871a2008-07-27 21:46:04 +00008372 return SDValue();
Chris Lattneraf723b92008-01-25 05:46:26 +00008373}
8374
Dan Gohmane5af2d32009-01-29 01:59:02 +00008375static SDValue PerformBTCombine(SDNode *N,
8376 SelectionDAG &DAG,
8377 TargetLowering::DAGCombinerInfo &DCI) {
8378 // BT ignores high bits in the bit index operand.
8379 SDValue Op1 = N->getOperand(1);
8380 if (Op1.hasOneUse()) {
8381 unsigned BitWidth = Op1.getValueSizeInBits();
8382 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
8383 APInt KnownZero, KnownOne;
8384 TargetLowering::TargetLoweringOpt TLO(DAG);
8385 TargetLowering &TLI = DAG.getTargetLoweringInfo();
8386 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) ||
8387 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO))
8388 DCI.CommitTargetLoweringOpt(TLO);
8389 }
8390 return SDValue();
8391}
Chris Lattner83e6c992006-10-04 06:57:07 +00008392
Eli Friedman7a5e5552009-06-07 06:52:44 +00008393static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) {
8394 SDValue Op = N->getOperand(0);
8395 if (Op.getOpcode() == ISD::BIT_CONVERT)
8396 Op = Op.getOperand(0);
8397 MVT VT = N->getValueType(0), OpVT = Op.getValueType();
8398 if (Op.getOpcode() == X86ISD::VZEXT_LOAD &&
8399 VT.getVectorElementType().getSizeInBits() ==
8400 OpVT.getVectorElementType().getSizeInBits()) {
8401 return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), VT, Op);
8402 }
8403 return SDValue();
8404}
8405
Dan Gohman475871a2008-07-27 21:46:04 +00008406SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
Evan Cheng9dd93b32008-11-05 06:03:38 +00008407 DAGCombinerInfo &DCI) const {
Evan Cheng206ee9d2006-07-07 08:33:52 +00008408 SelectionDAG &DAG = DCI.DAG;
8409 switch (N->getOpcode()) {
8410 default: break;
Evan Chengad4196b2008-05-12 19:56:52 +00008411 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, *this);
Chris Lattneraf723b92008-01-25 05:46:26 +00008412 case ISD::SELECT: return PerformSELECTCombine(N, DAG, Subtarget);
Chris Lattnerd1980a52009-03-12 06:52:53 +00008413 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI);
Evan Cheng0b0cd912009-03-28 05:57:29 +00008414 case ISD::MUL: return PerformMulCombine(N, DAG, DCI);
Nate Begeman740ab032009-01-26 00:52:55 +00008415 case ISD::SHL:
8416 case ISD::SRA:
8417 case ISD::SRL: return PerformShiftCombine(N, DAG, Subtarget);
Evan Cheng7e2ff772008-05-08 00:57:18 +00008418 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
Chris Lattner6cf73262008-01-25 06:14:17 +00008419 case X86ISD::FXOR:
Chris Lattneraf723b92008-01-25 05:46:26 +00008420 case X86ISD::FOR: return PerformFORCombine(N, DAG);
8421 case X86ISD::FAND: return PerformFANDCombine(N, DAG);
Dan Gohmane5af2d32009-01-29 01:59:02 +00008422 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI);
Eli Friedman7a5e5552009-06-07 06:52:44 +00008423 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
Evan Cheng206ee9d2006-07-07 08:33:52 +00008424 }
8425
Dan Gohman475871a2008-07-27 21:46:04 +00008426 return SDValue();
Evan Cheng206ee9d2006-07-07 08:33:52 +00008427}
8428
Evan Cheng60c07e12006-07-05 22:17:51 +00008429//===----------------------------------------------------------------------===//
8430// X86 Inline Assembly Support
8431//===----------------------------------------------------------------------===//
8432
Chris Lattnerf4dff842006-07-11 02:54:03 +00008433/// getConstraintType - Given a constraint letter, return the type of
8434/// constraint it is for this target.
8435X86TargetLowering::ConstraintType
Chris Lattner4234f572007-03-25 02:14:49 +00008436X86TargetLowering::getConstraintType(const std::string &Constraint) const {
8437 if (Constraint.size() == 1) {
8438 switch (Constraint[0]) {
8439 case 'A':
Dale Johannesen330169f2008-11-13 21:52:36 +00008440 return C_Register;
Chris Lattnerfce84ac2008-03-11 19:06:29 +00008441 case 'f':
Chris Lattner4234f572007-03-25 02:14:49 +00008442 case 'r':
8443 case 'R':
8444 case 'l':
8445 case 'q':
8446 case 'Q':
8447 case 'x':
Dale Johannesen2ffbcac2008-04-01 00:57:48 +00008448 case 'y':
Chris Lattner4234f572007-03-25 02:14:49 +00008449 case 'Y':
8450 return C_RegisterClass;
Dale Johannesen78e3e522009-02-12 20:58:09 +00008451 case 'e':
8452 case 'Z':
8453 return C_Other;
Chris Lattner4234f572007-03-25 02:14:49 +00008454 default:
8455 break;
8456 }
Chris Lattnerf4dff842006-07-11 02:54:03 +00008457 }
Chris Lattner4234f572007-03-25 02:14:49 +00008458 return TargetLowering::getConstraintType(Constraint);
Chris Lattnerf4dff842006-07-11 02:54:03 +00008459}
8460
Dale Johannesenba2a0b92008-01-29 02:21:21 +00008461/// LowerXConstraint - try to replace an X constraint, which matches anything,
8462/// with another that has more specific requirements based on the type of the
8463/// corresponding operand.
Chris Lattner5e764232008-04-26 23:02:14 +00008464const char *X86TargetLowering::
Duncan Sands83ec4b62008-06-06 12:08:01 +00008465LowerXConstraint(MVT ConstraintVT) const {
Chris Lattner5e764232008-04-26 23:02:14 +00008466 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
8467 // 'f' like normal targets.
Duncan Sands83ec4b62008-06-06 12:08:01 +00008468 if (ConstraintVT.isFloatingPoint()) {
Dale Johannesenba2a0b92008-01-29 02:21:21 +00008469 if (Subtarget->hasSSE2())
Chris Lattner5e764232008-04-26 23:02:14 +00008470 return "Y";
8471 if (Subtarget->hasSSE1())
8472 return "x";
8473 }
Scott Michelfdc40a02009-02-17 22:15:04 +00008474
Chris Lattner5e764232008-04-26 23:02:14 +00008475 return TargetLowering::LowerXConstraint(ConstraintVT);
Dale Johannesenba2a0b92008-01-29 02:21:21 +00008476}
8477
Chris Lattner48884cd2007-08-25 00:47:38 +00008478/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
8479/// vector. If it is invalid, don't add anything to Ops.
Dan Gohman475871a2008-07-27 21:46:04 +00008480void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
Chris Lattner48884cd2007-08-25 00:47:38 +00008481 char Constraint,
Evan Chengda43bcf2008-09-24 00:05:32 +00008482 bool hasMemory,
Dan Gohman475871a2008-07-27 21:46:04 +00008483 std::vector<SDValue>&Ops,
Chris Lattner5e764232008-04-26 23:02:14 +00008484 SelectionDAG &DAG) const {
Dan Gohman475871a2008-07-27 21:46:04 +00008485 SDValue Result(0, 0);
Scott Michelfdc40a02009-02-17 22:15:04 +00008486
Chris Lattner22aaf1d2006-10-31 20:13:11 +00008487 switch (Constraint) {
8488 default: break;
Devang Patel84f7fd22007-03-17 00:13:28 +00008489 case 'I':
Chris Lattner188b9fe2007-03-25 01:57:35 +00008490 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
Dan Gohmanf5aeb1a2008-09-12 16:56:44 +00008491 if (C->getZExtValue() <= 31) {
8492 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
Chris Lattner48884cd2007-08-25 00:47:38 +00008493 break;
8494 }
Devang Patel84f7fd22007-03-17 00:13:28 +00008495 }
Chris Lattner48884cd2007-08-25 00:47:38 +00008496 return;
Evan Cheng364091e2008-09-22 23:57:37 +00008497 case 'J':
8498 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
Chris Lattner2e06dd22009-06-15 04:39:05 +00008499 if (C->getZExtValue() <= 63) {
Chris Lattnere4935152009-06-15 04:01:39 +00008500 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
8501 break;
8502 }
8503 }
8504 return;
8505 case 'K':
8506 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
Chris Lattner2e06dd22009-06-15 04:39:05 +00008507 if ((int8_t)C->getSExtValue() == C->getSExtValue()) {
Evan Cheng364091e2008-09-22 23:57:37 +00008508 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
8509 break;
8510 }
8511 }
8512 return;
Chris Lattner188b9fe2007-03-25 01:57:35 +00008513 case 'N':
8514 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
Dan Gohmanf5aeb1a2008-09-12 16:56:44 +00008515 if (C->getZExtValue() <= 255) {
8516 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
Chris Lattner48884cd2007-08-25 00:47:38 +00008517 break;
8518 }
Chris Lattner188b9fe2007-03-25 01:57:35 +00008519 }
Chris Lattner48884cd2007-08-25 00:47:38 +00008520 return;
Dale Johannesen78e3e522009-02-12 20:58:09 +00008521 case 'e': {
8522 // 32-bit signed value
8523 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
8524 const ConstantInt *CI = C->getConstantIntValue();
8525 if (CI->isValueValidForType(Type::Int32Ty, C->getSExtValue())) {
8526 // Widen to 64 bits here to get it sign extended.
8527 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64);
8528 break;
8529 }
8530 // FIXME gcc accepts some relocatable values here too, but only in certain
8531 // memory models; it's complicated.
8532 }
8533 return;
8534 }
8535 case 'Z': {
8536 // 32-bit unsigned value
8537 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
8538 const ConstantInt *CI = C->getConstantIntValue();
8539 if (CI->isValueValidForType(Type::Int32Ty, C->getZExtValue())) {
8540 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
8541 break;
8542 }
8543 }
8544 // FIXME gcc accepts some relocatable values here too, but only in certain
8545 // memory models; it's complicated.
8546 return;
8547 }
Chris Lattnerdc43a882007-05-03 16:52:29 +00008548 case 'i': {
Chris Lattner22aaf1d2006-10-31 20:13:11 +00008549 // Literal immediates are always ok.
Chris Lattner48884cd2007-08-25 00:47:38 +00008550 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
Dale Johannesen78e3e522009-02-12 20:58:09 +00008551 // Widen to 64 bits here to get it sign extended.
8552 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64);
Chris Lattner48884cd2007-08-25 00:47:38 +00008553 break;
8554 }
Anton Korobeynikov12c49af2006-11-21 00:01:06 +00008555
Chris Lattnerdc43a882007-05-03 16:52:29 +00008556 // If we are in non-pic codegen mode, we allow the address of a global (with
8557 // an optional displacement) to be used with 'i'.
Chris Lattner49921962009-05-08 18:23:14 +00008558 GlobalAddressSDNode *GA = 0;
Chris Lattnerdc43a882007-05-03 16:52:29 +00008559 int64_t Offset = 0;
Scott Michelfdc40a02009-02-17 22:15:04 +00008560
Chris Lattner49921962009-05-08 18:23:14 +00008561 // Match either (GA), (GA+C), (GA+C1+C2), etc.
8562 while (1) {
8563 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) {
8564 Offset += GA->getOffset();
8565 break;
8566 } else if (Op.getOpcode() == ISD::ADD) {
8567 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
8568 Offset += C->getZExtValue();
8569 Op = Op.getOperand(0);
8570 continue;
8571 }
8572 } else if (Op.getOpcode() == ISD::SUB) {
8573 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
8574 Offset += -C->getZExtValue();
8575 Op = Op.getOperand(0);
8576 continue;
8577 }
Chris Lattnerdc43a882007-05-03 16:52:29 +00008578 }
Chris Lattner49921962009-05-08 18:23:14 +00008579
8580 // Otherwise, this isn't something we can handle, reject it.
8581 return;
Chris Lattnerdc43a882007-05-03 16:52:29 +00008582 }
Scott Michelfdc40a02009-02-17 22:15:04 +00008583
Chris Lattner49921962009-05-08 18:23:14 +00008584 if (hasMemory)
8585 Op = LowerGlobalAddress(GA->getGlobal(), Op.getDebugLoc(), Offset, DAG);
8586 else
8587 Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0),
8588 Offset);
8589 Result = Op;
8590 break;
Chris Lattner22aaf1d2006-10-31 20:13:11 +00008591 }
Chris Lattnerdc43a882007-05-03 16:52:29 +00008592 }
Scott Michelfdc40a02009-02-17 22:15:04 +00008593
Gabor Greifba36cb52008-08-28 21:40:38 +00008594 if (Result.getNode()) {
Chris Lattner48884cd2007-08-25 00:47:38 +00008595 Ops.push_back(Result);
8596 return;
8597 }
Evan Chengda43bcf2008-09-24 00:05:32 +00008598 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, hasMemory,
8599 Ops, DAG);
Chris Lattner22aaf1d2006-10-31 20:13:11 +00008600}
8601
Chris Lattner259e97c2006-01-31 19:43:35 +00008602std::vector<unsigned> X86TargetLowering::
Chris Lattner1efa40f2006-02-22 00:56:39 +00008603getRegClassForInlineAsmConstraint(const std::string &Constraint,
Duncan Sands83ec4b62008-06-06 12:08:01 +00008604 MVT VT) const {
Chris Lattner259e97c2006-01-31 19:43:35 +00008605 if (Constraint.size() == 1) {
8606 // FIXME: not handling fp-stack yet!
Chris Lattner259e97c2006-01-31 19:43:35 +00008607 switch (Constraint[0]) { // GCC X86 Constraint Letters
Chris Lattnerf4dff842006-07-11 02:54:03 +00008608 default: break; // Unknown constraint letter
Chris Lattner259e97c2006-01-31 19:43:35 +00008609 case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode)
8610 case 'Q': // Q_REGS
Chris Lattner80a7ecc2006-05-06 00:29:37 +00008611 if (VT == MVT::i32)
8612 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0);
8613 else if (VT == MVT::i16)
8614 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0);
8615 else if (VT == MVT::i8)
Evan Cheng12914382007-08-13 23:27:11 +00008616 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 0);
Chris Lattner03e6c702007-11-04 06:51:12 +00008617 else if (VT == MVT::i64)
8618 return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX, 0);
8619 break;
Chris Lattner259e97c2006-01-31 19:43:35 +00008620 }
8621 }
Anton Korobeynikov12c49af2006-11-21 00:01:06 +00008622
Chris Lattner1efa40f2006-02-22 00:56:39 +00008623 return std::vector<unsigned>();
Chris Lattner259e97c2006-01-31 19:43:35 +00008624}
Chris Lattnerf76d1802006-07-31 23:26:50 +00008625
Anton Korobeynikov12c49af2006-11-21 00:01:06 +00008626std::pair<unsigned, const TargetRegisterClass*>
Chris Lattnerf76d1802006-07-31 23:26:50 +00008627X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
Duncan Sands83ec4b62008-06-06 12:08:01 +00008628 MVT VT) const {
Chris Lattnerad043e82007-04-09 05:11:28 +00008629 // First, see if this is a constraint that directly corresponds to an LLVM
8630 // register class.
8631 if (Constraint.size() == 1) {
8632 // GCC Constraint Letters
8633 switch (Constraint[0]) {
8634 default: break;
Chris Lattner0f65cad2007-04-09 05:49:22 +00008635 case 'r': // GENERAL_REGS
8636 case 'R': // LEGACY_REGS
8637 case 'l': // INDEX_REGS
Chris Lattner1fa71982008-10-17 18:15:05 +00008638 if (VT == MVT::i8)
Chris Lattner0f65cad2007-04-09 05:49:22 +00008639 return std::make_pair(0U, X86::GR8RegisterClass);
Chris Lattner1fa71982008-10-17 18:15:05 +00008640 if (VT == MVT::i16)
8641 return std::make_pair(0U, X86::GR16RegisterClass);
8642 if (VT == MVT::i32 || !Subtarget->is64Bit())
Scott Michelfdc40a02009-02-17 22:15:04 +00008643 return std::make_pair(0U, X86::GR32RegisterClass);
Chris Lattner1fa71982008-10-17 18:15:05 +00008644 return std::make_pair(0U, X86::GR64RegisterClass);
Chris Lattnerfce84ac2008-03-11 19:06:29 +00008645 case 'f': // FP Stack registers.
8646 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
8647 // value to the correct fpstack register class.
8648 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
8649 return std::make_pair(0U, X86::RFP32RegisterClass);
8650 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
8651 return std::make_pair(0U, X86::RFP64RegisterClass);
8652 return std::make_pair(0U, X86::RFP80RegisterClass);
Chris Lattner6c284d72007-04-12 04:14:49 +00008653 case 'y': // MMX_REGS if MMX allowed.
8654 if (!Subtarget->hasMMX()) break;
8655 return std::make_pair(0U, X86::VR64RegisterClass);
Chris Lattner0f65cad2007-04-09 05:49:22 +00008656 case 'Y': // SSE_REGS if SSE2 allowed
8657 if (!Subtarget->hasSSE2()) break;
8658 // FALL THROUGH.
8659 case 'x': // SSE_REGS if SSE1 allowed
8660 if (!Subtarget->hasSSE1()) break;
Duncan Sands83ec4b62008-06-06 12:08:01 +00008661
8662 switch (VT.getSimpleVT()) {
Chris Lattner0f65cad2007-04-09 05:49:22 +00008663 default: break;
8664 // Scalar SSE types.
8665 case MVT::f32:
8666 case MVT::i32:
Chris Lattnerad043e82007-04-09 05:11:28 +00008667 return std::make_pair(0U, X86::FR32RegisterClass);
Chris Lattner0f65cad2007-04-09 05:49:22 +00008668 case MVT::f64:
8669 case MVT::i64:
Chris Lattnerad043e82007-04-09 05:11:28 +00008670 return std::make_pair(0U, X86::FR64RegisterClass);
Chris Lattner0f65cad2007-04-09 05:49:22 +00008671 // Vector types.
Chris Lattner0f65cad2007-04-09 05:49:22 +00008672 case MVT::v16i8:
8673 case MVT::v8i16:
8674 case MVT::v4i32:
8675 case MVT::v2i64:
8676 case MVT::v4f32:
8677 case MVT::v2f64:
8678 return std::make_pair(0U, X86::VR128RegisterClass);
8679 }
Chris Lattnerad043e82007-04-09 05:11:28 +00008680 break;
8681 }
8682 }
Scott Michelfdc40a02009-02-17 22:15:04 +00008683
Chris Lattnerf76d1802006-07-31 23:26:50 +00008684 // Use the default implementation in TargetLowering to convert the register
8685 // constraint into a member of a register class.
8686 std::pair<unsigned, const TargetRegisterClass*> Res;
8687 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
Chris Lattner1a60aa72006-10-31 19:42:44 +00008688
8689 // Not found as a standard register?
8690 if (Res.second == 0) {
8691 // GCC calls "st(0)" just plain "st".
8692 if (StringsEqualNoCase("{st}", Constraint)) {
8693 Res.first = X86::ST0;
Chris Lattner9b4baf12007-09-24 05:27:37 +00008694 Res.second = X86::RFP80RegisterClass;
Chris Lattner1a60aa72006-10-31 19:42:44 +00008695 }
Dale Johannesen330169f2008-11-13 21:52:36 +00008696 // 'A' means EAX + EDX.
8697 if (Constraint == "A") {
8698 Res.first = X86::EAX;
8699 Res.second = X86::GRADRegisterClass;
8700 }
Chris Lattner1a60aa72006-10-31 19:42:44 +00008701 return Res;
8702 }
Anton Korobeynikov12c49af2006-11-21 00:01:06 +00008703
Chris Lattnerf76d1802006-07-31 23:26:50 +00008704 // Otherwise, check to see if this is a register class of the wrong value
8705 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
8706 // turn into {ax},{dx}.
8707 if (Res.second->hasType(VT))
8708 return Res; // Correct type already, nothing to do.
Anton Korobeynikov12c49af2006-11-21 00:01:06 +00008709
Chris Lattnerf76d1802006-07-31 23:26:50 +00008710 // All of the single-register GCC register classes map their values onto
8711 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we
8712 // really want an 8-bit or 32-bit register, map to the appropriate register
8713 // class and return the appropriate register.
Chris Lattner6ba50a92008-08-26 06:19:02 +00008714 if (Res.second == X86::GR16RegisterClass) {
8715 if (VT == MVT::i8) {
8716 unsigned DestReg = 0;
8717 switch (Res.first) {
8718 default: break;
8719 case X86::AX: DestReg = X86::AL; break;
8720 case X86::DX: DestReg = X86::DL; break;
8721 case X86::CX: DestReg = X86::CL; break;
8722 case X86::BX: DestReg = X86::BL; break;
8723 }
8724 if (DestReg) {
8725 Res.first = DestReg;
Duncan Sands005e7982009-04-21 09:44:39 +00008726 Res.second = X86::GR8RegisterClass;
Chris Lattner6ba50a92008-08-26 06:19:02 +00008727 }
8728 } else if (VT == MVT::i32) {
8729 unsigned DestReg = 0;
8730 switch (Res.first) {
8731 default: break;
8732 case X86::AX: DestReg = X86::EAX; break;
8733 case X86::DX: DestReg = X86::EDX; break;
8734 case X86::CX: DestReg = X86::ECX; break;
8735 case X86::BX: DestReg = X86::EBX; break;
8736 case X86::SI: DestReg = X86::ESI; break;
8737 case X86::DI: DestReg = X86::EDI; break;
8738 case X86::BP: DestReg = X86::EBP; break;
8739 case X86::SP: DestReg = X86::ESP; break;
8740 }
8741 if (DestReg) {
8742 Res.first = DestReg;
Duncan Sands005e7982009-04-21 09:44:39 +00008743 Res.second = X86::GR32RegisterClass;
Chris Lattner6ba50a92008-08-26 06:19:02 +00008744 }
8745 } else if (VT == MVT::i64) {
8746 unsigned DestReg = 0;
8747 switch (Res.first) {
8748 default: break;
8749 case X86::AX: DestReg = X86::RAX; break;
8750 case X86::DX: DestReg = X86::RDX; break;
8751 case X86::CX: DestReg = X86::RCX; break;
8752 case X86::BX: DestReg = X86::RBX; break;
8753 case X86::SI: DestReg = X86::RSI; break;
8754 case X86::DI: DestReg = X86::RDI; break;
8755 case X86::BP: DestReg = X86::RBP; break;
8756 case X86::SP: DestReg = X86::RSP; break;
8757 }
8758 if (DestReg) {
8759 Res.first = DestReg;
Duncan Sands005e7982009-04-21 09:44:39 +00008760 Res.second = X86::GR64RegisterClass;
Chris Lattner6ba50a92008-08-26 06:19:02 +00008761 }
Chris Lattnerf76d1802006-07-31 23:26:50 +00008762 }
Chris Lattner6ba50a92008-08-26 06:19:02 +00008763 } else if (Res.second == X86::FR32RegisterClass ||
8764 Res.second == X86::FR64RegisterClass ||
8765 Res.second == X86::VR128RegisterClass) {
8766 // Handle references to XMM physical registers that got mapped into the
8767 // wrong class. This can happen with constraints like {xmm0} where the
8768 // target independent register mapper will just pick the first match it can
8769 // find, ignoring the required type.
8770 if (VT == MVT::f32)
8771 Res.second = X86::FR32RegisterClass;
8772 else if (VT == MVT::f64)
8773 Res.second = X86::FR64RegisterClass;
8774 else if (X86::VR128RegisterClass->hasType(VT))
8775 Res.second = X86::VR128RegisterClass;
Chris Lattnerf76d1802006-07-31 23:26:50 +00008776 }
Anton Korobeynikov12c49af2006-11-21 00:01:06 +00008777
Chris Lattnerf76d1802006-07-31 23:26:50 +00008778 return Res;
8779}
Mon P Wang0c397192008-10-30 08:01:45 +00008780
8781//===----------------------------------------------------------------------===//
8782// X86 Widen vector type
8783//===----------------------------------------------------------------------===//
8784
8785/// getWidenVectorType: given a vector type, returns the type to widen
8786/// to (e.g., v7i8 to v8i8). If the vector type is legal, it returns itself.
8787/// If there is no vector type that we want to widen to, returns MVT::Other
Mon P Wangf007a8b2008-11-06 05:31:54 +00008788/// When and where to widen is target dependent based on the cost of
Mon P Wang0c397192008-10-30 08:01:45 +00008789/// scalarizing vs using the wider vector type.
8790
Dan Gohmanc13cf132009-01-15 17:34:08 +00008791MVT X86TargetLowering::getWidenVectorType(MVT VT) const {
Mon P Wang0c397192008-10-30 08:01:45 +00008792 assert(VT.isVector());
8793 if (isTypeLegal(VT))
8794 return VT;
Scott Michelfdc40a02009-02-17 22:15:04 +00008795
Mon P Wang0c397192008-10-30 08:01:45 +00008796 // TODO: In computeRegisterProperty, we can compute the list of legal vector
8797 // type based on element type. This would speed up our search (though
8798 // it may not be worth it since the size of the list is relatively
8799 // small).
8800 MVT EltVT = VT.getVectorElementType();
8801 unsigned NElts = VT.getVectorNumElements();
Scott Michelfdc40a02009-02-17 22:15:04 +00008802
Mon P Wang0c397192008-10-30 08:01:45 +00008803 // On X86, it make sense to widen any vector wider than 1
8804 if (NElts <= 1)
8805 return MVT::Other;
Scott Michelfdc40a02009-02-17 22:15:04 +00008806
8807 for (unsigned nVT = MVT::FIRST_VECTOR_VALUETYPE;
Mon P Wang0c397192008-10-30 08:01:45 +00008808 nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
8809 MVT SVT = (MVT::SimpleValueType)nVT;
Scott Michelfdc40a02009-02-17 22:15:04 +00008810
8811 if (isTypeLegal(SVT) &&
8812 SVT.getVectorElementType() == EltVT &&
Mon P Wang0c397192008-10-30 08:01:45 +00008813 SVT.getVectorNumElements() > NElts)
8814 return SVT;
8815 }
8816 return MVT::Other;
8817}