blob: 6e6f25c93a933e22d57db4cac54f3954ac49ef87 [file] [log] [blame]
Chris Lattner76ac0682005-11-15 00:40:23 +00001//===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
Chris Lattnerf3ebc3f2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Chris Lattner76ac0682005-11-15 00:40:23 +00007//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the interfaces that X86 uses to lower LLVM code into a
11// selection DAG.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef X86ISELLOWERING_H
16#define X86ISELLOWERING_H
17
Evan Chengcde9e302006-01-27 08:10:46 +000018#include "X86Subtarget.h"
Anton Korobeynikov383a3242007-07-14 14:06:15 +000019#include "X86RegisterInfo.h"
Gordon Henriksen92319582008-01-05 16:56:59 +000020#include "X86MachineFunctionInfo.h"
Chris Lattner76ac0682005-11-15 00:40:23 +000021#include "llvm/Target/TargetLowering.h"
Evan Cheng8703c412010-01-26 19:04:47 +000022#include "llvm/Target/TargetOptions.h"
Ted Kremenek2175b552008-09-03 02:54:11 +000023#include "llvm/CodeGen/FastISel.h"
Chris Lattner76ac0682005-11-15 00:40:23 +000024#include "llvm/CodeGen/SelectionDAG.h"
Rafael Espindolae636fc02007-08-31 15:06:30 +000025#include "llvm/CodeGen/CallingConvLower.h"
Chris Lattner76ac0682005-11-15 00:40:23 +000026
27namespace llvm {
Chris Lattner76ac0682005-11-15 00:40:23 +000028 namespace X86ISD {
Evan Cheng172fce72006-01-06 00:43:03 +000029 // X86 Specific DAG Nodes
Chris Lattner76ac0682005-11-15 00:40:23 +000030 enum NodeType {
31 // Start the numbering where the builtin ops leave off.
Dan Gohmaned1cf1a2008-09-23 18:42:32 +000032 FIRST_NUMBER = ISD::BUILTIN_OP_END,
Chris Lattner76ac0682005-11-15 00:40:23 +000033
Evan Chenge9fbc3f2007-12-14 02:13:44 +000034 /// BSF - Bit scan forward.
35 /// BSR - Bit scan reverse.
36 BSF,
37 BSR,
38
Evan Cheng9c249c32006-01-09 18:33:28 +000039 /// SHLD, SHRD - Double shift instructions. These correspond to
40 /// X86::SHLDxx and X86::SHRDxx instructions.
41 SHLD,
42 SHRD,
43
Evan Cheng2dd217b2006-01-31 03:14:29 +000044 /// FAND - Bitwise logical AND of floating point values. This corresponds
45 /// to X86::ANDPS or X86::ANDPD.
46 FAND,
47
Evan Cheng4363e882007-01-05 07:55:56 +000048 /// FOR - Bitwise logical OR of floating point values. This corresponds
49 /// to X86::ORPS or X86::ORPD.
50 FOR,
51
Evan Cheng72d5c252006-01-31 22:28:30 +000052 /// FXOR - Bitwise logical XOR of floating point values. This corresponds
53 /// to X86::XORPS or X86::XORPD.
54 FXOR,
55
Evan Cheng82241c82007-01-05 21:37:56 +000056 /// FSRL - Bitwise logical right shift of floating point values. These
57 /// corresponds to X86::PSRLDQ.
Evan Cheng4363e882007-01-05 07:55:56 +000058 FSRL,
59
Evan Cheng11613a52006-02-04 02:20:30 +000060 /// FILD, FILD_FLAG - This instruction implements SINT_TO_FP with the
61 /// integer source in memory and FP reg result. This corresponds to the
62 /// X86::FILD*m instructions. It has three inputs (token chain, address,
63 /// and source type) and two outputs (FP value and token chain). FILD_FLAG
64 /// also produces a flag).
Evan Cheng6305e502006-01-12 22:54:21 +000065 FILD,
Evan Cheng11613a52006-02-04 02:20:30 +000066 FILD_FLAG,
Chris Lattner76ac0682005-11-15 00:40:23 +000067
68 /// FP_TO_INT*_IN_MEM - This instruction implements FP_TO_SINT with the
69 /// integer destination in memory and a FP reg source. This corresponds
70 /// to the X86::FIST*m instructions and the rounding mode change stuff. It
Chris Lattnerf4aeff02006-10-18 18:26:48 +000071 /// has two inputs (token chain and address) and two outputs (int value
72 /// and token chain).
Chris Lattner76ac0682005-11-15 00:40:23 +000073 FP_TO_INT16_IN_MEM,
74 FP_TO_INT32_IN_MEM,
75 FP_TO_INT64_IN_MEM,
76
Evan Chenga74ce622005-12-21 02:39:21 +000077 /// FLD - This instruction implements an extending load to FP stack slots.
78 /// This corresponds to the X86::FLD32m / X86::FLD64m. It takes a chain
Evan Cheng5c59d492005-12-23 07:31:11 +000079 /// operand, ptr to load from, and a ValueType node indicating the type
80 /// to load to.
Evan Chenga74ce622005-12-21 02:39:21 +000081 FLD,
82
Evan Cheng45e190982006-01-05 00:27:02 +000083 /// FST - This instruction implements a truncating store to FP stack
84 /// slots. This corresponds to the X86::FST32m / X86::FST64m. It takes a
85 /// chain operand, value to store, address, and a ValueType to store it
86 /// as.
87 FST,
88
Dan Gohmanf9bbcd12009-08-05 01:29:28 +000089 /// CALL - These operations represent an abstract X86 call
Chris Lattner76ac0682005-11-15 00:40:23 +000090 /// instruction, which includes a bunch of information. In particular the
91 /// operands of these node are:
92 ///
93 /// #0 - The incoming token chain
94 /// #1 - The callee
95 /// #2 - The number of arg bytes the caller pushes on the stack.
96 /// #3 - The number of arg bytes the callee pops off the stack.
97 /// #4 - The value to pass in AL/AX/EAX (optional)
98 /// #5 - The value to pass in DL/DX/EDX (optional)
99 ///
100 /// The result values of these nodes are:
101 ///
102 /// #0 - The outgoing token chain
103 /// #1 - The first register result value (optional)
104 /// #2 - The second register result value (optional)
105 ///
Chris Lattner76ac0682005-11-15 00:40:23 +0000106 CALL,
Dan Gohmanf9bbcd12009-08-05 01:29:28 +0000107
Andrew Lenharth0bf68ae2005-11-20 21:41:10 +0000108 /// RDTSC_DAG - This operation implements the lowering for
109 /// readcyclecounter
110 RDTSC_DAG,
Evan Cheng225a4d02005-12-17 01:21:05 +0000111
112 /// X86 compare and logical compare instructions.
Evan Cheng80700992007-09-17 17:42:53 +0000113 CMP, COMI, UCOMI,
Evan Cheng225a4d02005-12-17 01:21:05 +0000114
Dan Gohman25a767d2008-12-23 22:45:23 +0000115 /// X86 bit-test instructions.
116 BT,
117
Dan Gohman4a683472009-03-23 15:40:10 +0000118 /// X86 SetCC. Operand 0 is condition code, and operand 1 is the flag
Evan Chengc1583db2005-12-21 20:21:51 +0000119 /// operand produced by a CMP instruction.
120 SETCC,
121
Evan Cheng0e8b9e32009-12-15 00:53:42 +0000122 // Same as SETCC except it's materialized with a sbb and the value is all
123 // one's or all zero's.
124 SETCC_CARRY,
125
Chris Lattnera492d292009-03-12 06:46:02 +0000126 /// X86 conditional moves. Operand 0 and operand 1 are the two values
127 /// to select from. Operand 2 is the condition code, and operand 3 is the
128 /// flag operand produced by a CMP or TEST instruction. It also writes a
129 /// flag result.
Evan Cheng225a4d02005-12-17 01:21:05 +0000130 CMOV,
Evan Cheng6fc31042005-12-19 23:12:38 +0000131
Dan Gohman4a683472009-03-23 15:40:10 +0000132 /// X86 conditional branches. Operand 0 is the chain operand, operand 1
133 /// is the block to branch if condition is true, operand 2 is the
134 /// condition code, and operand 3 is the flag operand produced by a CMP
Evan Chengc1583db2005-12-21 20:21:51 +0000135 /// or TEST instruction.
Evan Cheng6fc31042005-12-19 23:12:38 +0000136 BRCOND,
Evan Chenga74ce622005-12-21 02:39:21 +0000137
Dan Gohman4a683472009-03-23 15:40:10 +0000138 /// Return with a flag operand. Operand 0 is the chain operand, operand
139 /// 1 is the number of bytes of stack to pop.
Evan Chenga74ce622005-12-21 02:39:21 +0000140 RET_FLAG,
Evan Chengae986f12006-01-11 22:15:48 +0000141
142 /// REP_STOS - Repeat fill, corresponds to X86::REP_STOSx.
143 REP_STOS,
144
145 /// REP_MOVS - Repeat move, corresponds to X86::REP_MOVSx.
146 REP_MOVS,
Evan Cheng72d5c252006-01-31 22:28:30 +0000147
Evan Cheng5588de92006-02-18 00:15:05 +0000148 /// GlobalBaseReg - On Darwin, this node represents the result of the popl
149 /// at function entry, used for PIC code.
150 GlobalBaseReg,
Evan Cheng1f342c22006-02-23 02:43:52 +0000151
Bill Wendling24c79f22008-09-16 21:48:12 +0000152 /// Wrapper - A wrapper node for TargetConstantPool,
153 /// TargetExternalSymbol, and TargetGlobalAddress.
Evan Chenge0ed6ec2006-02-23 20:41:18 +0000154 Wrapper,
Evan Chengd5e905d2006-03-21 23:01:21 +0000155
Evan Chengae1cd752006-11-30 21:55:46 +0000156 /// WrapperRIP - Special wrapper used under X86-64 PIC mode for RIP
157 /// relative displacements.
158 WrapperRIP,
159
Mon P Wang586d9972010-01-24 00:05:03 +0000160 /// MOVQ2DQ - Copies a 64-bit value from a vector to another vector.
161 /// Can be used to move a vector value from a MMX register to a XMM
162 /// register.
163 MOVQ2DQ,
164
Nate Begeman2d77e8e42008-02-11 04:19:36 +0000165 /// PEXTRB - Extract an 8-bit value from a vector and zero extend it to
166 /// i32, corresponds to X86::PEXTRB.
167 PEXTRB,
168
Evan Chengcbffa462006-03-31 19:22:53 +0000169 /// PEXTRW - Extract a 16-bit value from a vector and zero extend it to
Evan Cheng5fd7c692006-03-31 21:55:24 +0000170 /// i32, corresponds to X86::PEXTRW.
Evan Chengcbffa462006-03-31 19:22:53 +0000171 PEXTRW,
Evan Cheng5fd7c692006-03-31 21:55:24 +0000172
Nate Begeman2d77e8e42008-02-11 04:19:36 +0000173 /// INSERTPS - Insert any element of a 4 x float vector into any element
174 /// of a destination 4 x floatvector.
175 INSERTPS,
176
177 /// PINSRB - Insert the lower 8-bits of a 32-bit value to a vector,
178 /// corresponds to X86::PINSRB.
179 PINSRB,
180
Evan Cheng5fd7c692006-03-31 21:55:24 +0000181 /// PINSRW - Insert the lower 16-bits of a 32-bit value to a vector,
182 /// corresponds to X86::PINSRW.
Chris Lattnera8288502010-02-23 02:07:48 +0000183 PINSRW, MMX_PINSRW,
Evan Cheng49683ba2006-11-10 21:43:37 +0000184
Nate Begemane684da32009-02-23 08:49:38 +0000185 /// PSHUFB - Shuffle 16 8-bit values within a vector.
186 PSHUFB,
187
Evan Cheng49683ba2006-11-10 21:43:37 +0000188 /// FMAX, FMIN - Floating point max and min.
189 ///
Lauro Ramos Venancio25188892007-04-20 21:38:10 +0000190 FMAX, FMIN,
Dan Gohman57111e72007-07-10 00:05:58 +0000191
192 /// FRSQRT, FRCP - Floating point reciprocal-sqrt and reciprocal
193 /// approximation. Note that these typically require refinement
194 /// in order to obtain suitable precision.
195 FRSQRT, FRCP,
196
Rafael Espindola3b2df102009-04-08 21:14:34 +0000197 // TLSADDR - Thread Local Storage.
198 TLSADDR,
Eric Christopherb0e1a452010-06-03 04:07:48 +0000199
200 // TLSCALL - Thread Local Storage. When calling to an OS provided
201 // thunk at the address from an earlier relocation.
202 TLSCALL,
Rafael Espindola3b2df102009-04-08 21:14:34 +0000203
204 // SegmentBaseAddress - The address segment:0
205 SegmentBaseAddress,
Anton Korobeynikov383a3242007-07-14 14:06:15 +0000206
Evan Cheng78af38c2008-05-08 00:57:18 +0000207 // EH_RETURN - Exception Handling helpers.
Arnold Schwaighofer9ccea992007-10-11 19:40:01 +0000208 EH_RETURN,
209
Arnold Schwaighofer7da2bce2008-03-19 16:39:45 +0000210 /// TC_RETURN - Tail call return.
211 /// operand #0 chain
212 /// operand #1 callee (register or absolute)
213 /// operand #2 stack adjustment
214 /// operand #3 optional in flag
Anton Korobeynikov91460e42007-11-16 01:31:51 +0000215 TC_RETURN,
216
Evan Cheng78af38c2008-05-08 00:57:18 +0000217 // LCMPXCHG_DAG, LCMPXCHG8_DAG - Compare and swap.
Andrew Lenharthd032c332008-03-01 21:52:34 +0000218 LCMPXCHG_DAG,
Andrew Lenharth357061a2008-03-05 01:15:49 +0000219 LCMPXCHG8_DAG,
Andrew Lenharthd032c332008-03-01 21:52:34 +0000220
Evan Cheng78af38c2008-05-08 00:57:18 +0000221 // FNSTCW16m - Store FP control world into i16 memory.
222 FNSTCW16m,
223
Evan Cheng961339b2008-05-09 21:53:03 +0000224 // VZEXT_MOVL - Vector move low and zero extend.
225 VZEXT_MOVL,
226
227 // VZEXT_LOAD - Load, scalar_to_vector, and zero extend.
Evan Cheng5e28227d2008-05-29 08:22:04 +0000228 VZEXT_LOAD,
229
230 // VSHL, VSRL - Vector logical left / right shift.
Nate Begeman55b7bec2008-07-17 16:51:19 +0000231 VSHL, VSRL,
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000232
233 // CMPPD, CMPPS - Vector double/float comparison.
Nate Begeman55b7bec2008-07-17 16:51:19 +0000234 // CMPPD, CMPPS - Vector double/float comparison.
235 CMPPD, CMPPS,
236
237 // PCMP* - Vector integer comparisons.
238 PCMPEQB, PCMPEQW, PCMPEQD, PCMPEQQ,
Bill Wendling1a317672008-12-12 00:56:36 +0000239 PCMPGTB, PCMPGTW, PCMPGTD, PCMPGTQ,
240
Dan Gohman55d7b2a2009-03-04 19:44:21 +0000241 // ADD, SUB, SMUL, UMUL, etc. - Arithmetic operations with FLAGS results.
242 ADD, SUB, SMUL, UMUL,
Dan Gohman722b1ee2009-09-18 19:59:53 +0000243 INC, DEC, OR, XOR, AND,
Evan Chenga84a3182009-03-30 21:36:47 +0000244
245 // MUL_IMM - X86 specific multiply by immediate.
Eric Christopherf7802a32009-07-29 00:28:05 +0000246 MUL_IMM,
247
248 // PTEST - Vector bitwise comparisons
Dan Gohman0700a562009-08-15 01:38:56 +0000249 PTEST,
250
Bruno Cardoso Lopes91d61df2010-08-10 23:25:42 +0000251 // TESTP - Vector packed fp sign bitwise comparisons
252 TESTP,
253
Bruno Cardoso Lopes6f3b38a2010-08-20 22:55:05 +0000254 // Several flavors of instructions with vector shuffle behaviors.
255 PALIGN,
256 PSHUFD,
257 PSHUFHW,
258 PSHUFLW,
259 PSHUFHW_LD,
260 PSHUFLW_LD,
261 SHUFPD,
262 SHUFPS,
263 MOVDDUP,
264 MOVSHDUP,
265 MOVSLDUP,
266 MOVSHDUP_LD,
267 MOVSLDUP_LD,
268 MOVLHPS,
Bruno Cardoso Lopes6f3b38a2010-08-20 22:55:05 +0000269 MOVLHPD,
Bruno Cardoso Lopes03e4c352010-08-31 21:15:21 +0000270 MOVHLPS,
Bruno Cardoso Lopes6f3b38a2010-08-20 22:55:05 +0000271 MOVHLPD,
Bruno Cardoso Lopesb3825212010-09-01 05:08:25 +0000272 MOVLPS,
273 MOVLPD,
Bruno Cardoso Lopes6f3b38a2010-08-20 22:55:05 +0000274 MOVSD,
275 MOVSS,
276 UNPCKLPS,
277 UNPCKLPD,
278 UNPCKHPS,
279 UNPCKHPD,
280 PUNPCKLBW,
281 PUNPCKLWD,
282 PUNPCKLDQ,
283 PUNPCKLQDQ,
284 PUNPCKHBW,
285 PUNPCKHWD,
286 PUNPCKHDQ,
287 PUNPCKHQDQ,
288
Dan Gohman0700a562009-08-15 01:38:56 +0000289 // VASTART_SAVE_XMM_REGS - Save xmm argument registers to the stack,
290 // according to %al. An operator is needed so that this can be expanded
291 // with control flow.
Dan Gohman48b185d2009-09-25 20:36:54 +0000292 VASTART_SAVE_XMM_REGS,
293
Anton Korobeynikovd5e3fd62010-03-06 19:32:29 +0000294 // MINGW_ALLOCA - MingW's __alloca call to do stack probing.
295 MINGW_ALLOCA,
296
Dan Gohman48b185d2009-09-25 20:36:54 +0000297 // ATOMADD64_DAG, ATOMSUB64_DAG, ATOMOR64_DAG, ATOMAND64_DAG,
298 // ATOMXOR64_DAG, ATOMNAND64_DAG, ATOMSWAP64_DAG -
299 // Atomic 64-bit binary operations.
300 ATOMADD64_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE,
301 ATOMSUB64_DAG,
302 ATOMOR64_DAG,
303 ATOMXOR64_DAG,
304 ATOMAND64_DAG,
305 ATOMNAND64_DAG,
Eric Christopher9a773822010-07-22 02:48:34 +0000306 ATOMSWAP64_DAG,
307
308 // Memory barrier
309 MEMBARRIER,
310 MFENCE,
311 SFENCE,
312 LFENCE
Anton Korobeynikovd5e3fd62010-03-06 19:32:29 +0000313
314 // WARNING: Do not add anything in the end unless you want the node to
315 // have memop! In fact, starting from ATOMADD64_DAG all opcodes will be
316 // thought as target memory ops!
Chris Lattner76ac0682005-11-15 00:40:23 +0000317 };
318 }
319
Evan Cheng084a1cd2008-01-29 19:34:22 +0000320 /// Define some predicates that are used for node matching.
321 namespace X86 {
322 /// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand
323 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000324 bool isPSHUFDMask(ShuffleVectorSDNode *N);
Evan Cheng68ad48b2006-03-22 18:59:22 +0000325
Evan Cheng084a1cd2008-01-29 19:34:22 +0000326 /// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand
327 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000328 bool isPSHUFHWMask(ShuffleVectorSDNode *N);
Evan Chengb7fedff2006-03-29 23:07:14 +0000329
Evan Cheng084a1cd2008-01-29 19:34:22 +0000330 /// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand
331 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000332 bool isPSHUFLWMask(ShuffleVectorSDNode *N);
Evan Chengb7fedff2006-03-29 23:07:14 +0000333
Evan Cheng084a1cd2008-01-29 19:34:22 +0000334 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
335 /// specifies a shuffle of elements that is suitable for input to SHUFP*.
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000336 bool isSHUFPMask(ShuffleVectorSDNode *N);
Evan Chengd27fb3e2006-03-24 01:18:28 +0000337
Evan Cheng084a1cd2008-01-29 19:34:22 +0000338 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
339 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000340 bool isMOVHLPSMask(ShuffleVectorSDNode *N);
Evan Cheng2595a682006-03-24 02:58:06 +0000341
Evan Cheng084a1cd2008-01-29 19:34:22 +0000342 /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
343 /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
344 /// <2, 3, 2, 3>
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000345 bool isMOVHLPS_v_undef_Mask(ShuffleVectorSDNode *N);
Evan Cheng922e1912006-11-07 22:14:24 +0000346
Evan Cheng084a1cd2008-01-29 19:34:22 +0000347 /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000348 /// specifies a shuffle of elements that is suitable for MOVLP{S|D}.
349 bool isMOVLPMask(ShuffleVectorSDNode *N);
Evan Chengc995b452006-04-06 23:23:56 +0000350
Evan Cheng084a1cd2008-01-29 19:34:22 +0000351 /// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000352 /// specifies a shuffle of elements that is suitable for MOVHP{S|D}.
Evan Cheng084a1cd2008-01-29 19:34:22 +0000353 /// as well as MOVLHPS.
Nate Begeman3a313df2009-11-07 23:17:15 +0000354 bool isMOVLHPSMask(ShuffleVectorSDNode *N);
Evan Chengc995b452006-04-06 23:23:56 +0000355
Evan Cheng084a1cd2008-01-29 19:34:22 +0000356 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
357 /// specifies a shuffle of elements that is suitable for input to UNPCKL.
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000358 bool isUNPCKLMask(ShuffleVectorSDNode *N, bool V2IsSplat = false);
Evan Cheng5df75882006-03-28 00:39:58 +0000359
Evan Cheng084a1cd2008-01-29 19:34:22 +0000360 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
361 /// specifies a shuffle of elements that is suitable for input to UNPCKH.
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000362 bool isUNPCKHMask(ShuffleVectorSDNode *N, bool V2IsSplat = false);
Evan Cheng2bc32802006-03-28 02:43:26 +0000363
Evan Cheng084a1cd2008-01-29 19:34:22 +0000364 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
365 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
366 /// <0, 0, 1, 1>
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000367 bool isUNPCKL_v_undef_Mask(ShuffleVectorSDNode *N);
Evan Chengf3b52c82006-04-05 07:20:06 +0000368
Evan Cheng084a1cd2008-01-29 19:34:22 +0000369 /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
370 /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
371 /// <2, 2, 3, 3>
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000372 bool isUNPCKH_v_undef_Mask(ShuffleVectorSDNode *N);
Bill Wendling591eab82007-04-24 21:16:55 +0000373
Evan Cheng084a1cd2008-01-29 19:34:22 +0000374 /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
375 /// specifies a shuffle of elements that is suitable for input to MOVSS,
376 /// MOVSD, and MOVD, i.e. setting the lowest element.
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000377 bool isMOVLMask(ShuffleVectorSDNode *N);
Evan Cheng12ba3e22006-04-11 00:19:04 +0000378
Evan Cheng084a1cd2008-01-29 19:34:22 +0000379 /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
380 /// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000381 bool isMOVSHDUPMask(ShuffleVectorSDNode *N);
Evan Cheng5d247f82006-04-14 21:59:03 +0000382
Evan Cheng084a1cd2008-01-29 19:34:22 +0000383 /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
384 /// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000385 bool isMOVSLDUPMask(ShuffleVectorSDNode *N);
Evan Chenge056dd52006-10-27 21:08:32 +0000386
Evan Cheng74c9ed92008-09-25 20:50:48 +0000387 /// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
388 /// specifies a shuffle of elements that is suitable for input to MOVDDUP.
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000389 bool isMOVDDUPMask(ShuffleVectorSDNode *N);
Evan Cheng74c9ed92008-09-25 20:50:48 +0000390
Nate Begeman18df82a2009-10-19 02:17:23 +0000391 /// isPALIGNRMask - Return true if the specified VECTOR_SHUFFLE operand
392 /// specifies a shuffle of elements that is suitable for input to PALIGNR.
393 bool isPALIGNRMask(ShuffleVectorSDNode *N);
394
Evan Cheng084a1cd2008-01-29 19:34:22 +0000395 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
396 /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP*
397 /// instructions.
398 unsigned getShuffleSHUFImmediate(SDNode *N);
Evan Chengb7fedff2006-03-29 23:07:14 +0000399
Evan Cheng084a1cd2008-01-29 19:34:22 +0000400 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
Nate Begeman18df82a2009-10-19 02:17:23 +0000401 /// the specified VECTOR_SHUFFLE mask with PSHUFHW instruction.
Evan Cheng084a1cd2008-01-29 19:34:22 +0000402 unsigned getShufflePSHUFHWImmediate(SDNode *N);
Evan Chengb7fedff2006-03-29 23:07:14 +0000403
Nate Begeman18df82a2009-10-19 02:17:23 +0000404 /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
405 /// the specified VECTOR_SHUFFLE mask with PSHUFLW instruction.
Evan Cheng084a1cd2008-01-29 19:34:22 +0000406 unsigned getShufflePSHUFLWImmediate(SDNode *N);
Evan Chenge62288f2009-07-30 08:33:02 +0000407
Nate Begeman18df82a2009-10-19 02:17:23 +0000408 /// getShufflePALIGNRImmediate - Return the appropriate immediate to shuffle
409 /// the specified VECTOR_SHUFFLE mask with the PALIGNR instruction.
410 unsigned getShufflePALIGNRImmediate(SDNode *N);
411
Evan Chenge62288f2009-07-30 08:33:02 +0000412 /// isZeroNode - Returns true if Elt is a constant zero or a floating point
413 /// constant +0.0.
414 bool isZeroNode(SDValue Elt);
Anton Korobeynikov741ea0d2009-08-05 23:01:26 +0000415
416 /// isOffsetSuitableForCodeModel - Returns true of the given offset can be
417 /// fit into displacement field of the instruction.
418 bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
419 bool hasSymbolicDisplacement = true);
Evan Cheng084a1cd2008-01-29 19:34:22 +0000420 }
421
Chris Lattnerf4aeff02006-10-18 18:26:48 +0000422 //===--------------------------------------------------------------------===//
Chris Lattner76ac0682005-11-15 00:40:23 +0000423 // X86TargetLowering - X86 Implementation of the TargetLowering interface
424 class X86TargetLowering : public TargetLowering {
Chris Lattner76ac0682005-11-15 00:40:23 +0000425 public:
Dan Gohmaneabd6472008-05-14 01:58:56 +0000426 explicit X86TargetLowering(X86TargetMachine &TM);
Chris Lattner76ac0682005-11-15 00:40:23 +0000427
Chris Lattner8a785d72010-01-26 06:28:43 +0000428 /// getPICBaseSymbol - Return the X86-32 PIC base.
429 MCSymbol *getPICBaseSymbol(const MachineFunction *MF, MCContext &Ctx) const;
430
Chris Lattner4bfbe932010-01-26 05:02:42 +0000431 virtual unsigned getJumpTableEncoding() const;
Chris Lattner9c1efcd2010-01-25 23:38:14 +0000432
Chris Lattner4bfbe932010-01-26 05:02:42 +0000433 virtual const MCExpr *
434 LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
435 const MachineBasicBlock *MBB, unsigned uid,
436 MCContext &Ctx) const;
437
Evan Cheng797d56f2007-11-09 01:32:10 +0000438 /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
439 /// jumptable.
Chris Lattner4bfbe932010-01-26 05:02:42 +0000440 virtual SDValue getPICJumpTableRelocBase(SDValue Table,
441 SelectionDAG &DAG) const;
Chris Lattner8a785d72010-01-26 06:28:43 +0000442 virtual const MCExpr *
443 getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
444 unsigned JTI, MCContext &Ctx) const;
445
Chris Lattner74f5bcf2007-02-26 04:01:25 +0000446 /// getStackPtrReg - Return the stack pointer register we are using: either
447 /// ESP or RSP.
448 unsigned getStackPtrReg() const { return X86StackPtr; }
Evan Cheng35abd842008-01-23 23:17:41 +0000449
450 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
451 /// function arguments in the caller parameter area. For X86, aggregates
452 /// that contains are placed at 16-byte boundaries while the rest are at
453 /// 4-byte boundaries.
454 virtual unsigned getByValTypeAlignment(const Type *Ty) const;
Evan Chengef377ad2008-05-15 08:39:06 +0000455
456 /// getOptimalMemOpType - Returns the target specific optimal type for load
Evan Cheng61399372010-04-02 19:36:14 +0000457 /// and store operations as a result of memset, memcpy, and memmove
458 /// lowering. If DstAlign is zero that means it's safe to destination
459 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
460 /// means there isn't a need to check it against alignment requirement,
461 /// probably because the source does not need to be loaded. If
462 /// 'NonScalarIntSafe' is true, that means it's safe to return a
463 /// non-scalar-integer type, e.g. empty string source, constant, or loaded
Evan Chengebe47c82010-04-08 07:37:57 +0000464 /// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is
465 /// constant so it does not need to be loaded.
Dan Gohman148c69a2010-04-16 20:11:05 +0000466 /// It returns EVT::Other if the type should be determined using generic
467 /// target-independent logic.
Evan Cheng61399372010-04-02 19:36:14 +0000468 virtual EVT
Evan Chengebe47c82010-04-08 07:37:57 +0000469 getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
470 bool NonScalarIntSafe, bool MemcpyStrSrc,
Dan Gohman148c69a2010-04-16 20:11:05 +0000471 MachineFunction &MF) const;
Bill Wendlingbae6b2c2009-08-15 21:21:19 +0000472
473 /// allowsUnalignedMemoryAccesses - Returns true if the target allows
474 /// unaligned memory accesses. of the specified type.
475 virtual bool allowsUnalignedMemoryAccesses(EVT VT) const {
476 return true;
477 }
Bill Wendling31ceb1b2009-06-30 22:38:32 +0000478
Chris Lattner76ac0682005-11-15 00:40:23 +0000479 /// LowerOperation - Provide custom lowering hooks for some operations.
480 ///
Dan Gohman21cea8a2010-04-17 15:26:15 +0000481 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
Chris Lattner76ac0682005-11-15 00:40:23 +0000482
Duncan Sands6ed40142008-12-01 11:39:25 +0000483 /// ReplaceNodeResults - Replace the results of node with an illegal result
484 /// type with new values built out of custom code.
Chris Lattnerf81d5882007-11-24 07:07:01 +0000485 ///
Duncan Sands6ed40142008-12-01 11:39:25 +0000486 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
Dan Gohman21cea8a2010-04-17 15:26:15 +0000487 SelectionDAG &DAG) const;
Chris Lattnerf81d5882007-11-24 07:07:01 +0000488
489
Dan Gohman2ce6f2a2008-07-27 21:46:04 +0000490 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
Evan Cheng5987cfb2006-07-07 08:33:52 +0000491
Evan Chengf1bd5fc2010-04-17 06:13:15 +0000492 /// isTypeDesirableForOp - Return true if the target has native support for
493 /// the specified value type and it is 'desirable' to use the type for the
494 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
495 /// instruction encodings are longer and some i16 instructions are slow.
496 virtual bool isTypeDesirableForOp(unsigned Opc, EVT VT) const;
497
498 /// isTypeDesirable - Return true if the target has native support for the
499 /// specified value type and it is 'desirable' to use the type. e.g. On x86
500 /// i16 is legal, but undesirable since i16 instruction encodings are longer
501 /// and some i16 instructions are slow.
502 virtual bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const;
Evan Chengaf56fac2010-04-16 06:14:10 +0000503
Dan Gohman25c16532010-05-01 00:01:06 +0000504 virtual MachineBasicBlock *
505 EmitInstrWithCustomInserter(MachineInstr *MI,
506 MachineBasicBlock *MBB) const;
Evan Cheng339edad2006-01-11 00:33:36 +0000507
Mon P Wang3e583932008-05-05 19:05:59 +0000508
Evan Cheng6af02632005-12-20 06:22:03 +0000509 /// getTargetNodeName - This method returns the name of a target specific
510 /// DAG node.
511 virtual const char *getTargetNodeName(unsigned Opcode) const;
512
Scott Michela6729e82008-03-10 15:42:14 +0000513 /// getSetCCResultType - Return the ISD::SETCC ValueType
Owen Anderson9f944592009-08-11 20:47:22 +0000514 virtual MVT::SimpleValueType getSetCCResultType(EVT VT) const;
Scott Michela6729e82008-03-10 15:42:14 +0000515
Nate Begeman8a77efe2006-02-16 21:11:51 +0000516 /// computeMaskedBitsForTargetNode - Determine which of the bits specified
517 /// in Mask are known to be either zero or one and return them in the
518 /// KnownZero/KnownOne bitsets.
Dan Gohman2ce6f2a2008-07-27 21:46:04 +0000519 virtual void computeMaskedBitsForTargetNode(const SDValue Op,
Dan Gohmane1d9ee62008-02-13 22:28:48 +0000520 const APInt &Mask,
Dan Gohmanf990faf2008-02-13 00:35:47 +0000521 APInt &KnownZero,
522 APInt &KnownOne,
Dan Gohman309d3d52007-06-22 14:59:07 +0000523 const SelectionDAG &DAG,
Nate Begeman8a77efe2006-02-16 21:11:51 +0000524 unsigned Depth = 0) const;
Evan Cheng2609d5e2008-05-12 19:56:52 +0000525
526 virtual bool
Dan Gohmanbcaf6812010-04-15 01:51:59 +0000527 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
Nate Begeman8a77efe2006-02-16 21:11:51 +0000528
Dan Gohman21cea8a2010-04-17 15:26:15 +0000529 SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const;
Chris Lattner76ac0682005-11-15 00:40:23 +0000530
Chris Lattner5849d222009-07-20 17:51:36 +0000531 virtual bool ExpandInlineAsm(CallInst *CI) const;
532
Chris Lattnerd6855142007-03-25 02:14:49 +0000533 ConstraintType getConstraintType(const std::string &Constraint) const;
John Thompson1094c802010-09-13 18:15:37 +0000534
535 /// Examine constraint string and operand type and determine a weight value,
536 /// where: -1 = invalid match, and 0 = so-so match to 3 = good match.
537 /// The operand object must already have been set up with the operand type.
538 virtual int getSingleConstraintMatchWeight(
539 AsmOperandInfo &info, const char *constraint) const;
Chris Lattner298ef372006-07-11 02:54:03 +0000540
Chris Lattnerc642aa52006-01-31 19:43:35 +0000541 std::vector<unsigned>
Chris Lattner7ad77df2006-02-22 00:56:39 +0000542 getRegClassForInlineAsmConstraint(const std::string &Constraint,
Owen Anderson53aa7a92009-08-10 22:56:29 +0000543 EVT VT) const;
Chris Lattnerd8c9cb92007-08-25 00:47:38 +0000544
Owen Anderson53aa7a92009-08-10 22:56:29 +0000545 virtual const char *LowerXConstraint(EVT ConstraintVT) const;
Dale Johannesen2b3bc302008-01-29 02:21:21 +0000546
Chris Lattnerd8c9cb92007-08-25 00:47:38 +0000547 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
Evan Chenge0add202008-09-24 00:05:32 +0000548 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
549 /// true it means one of the asm constraint of the inline asm instruction
550 /// being processed is 'm'.
Dan Gohman2ce6f2a2008-07-27 21:46:04 +0000551 virtual void LowerAsmOperandForConstraint(SDValue Op,
Chris Lattnerd8c9cb92007-08-25 00:47:38 +0000552 char ConstraintLetter,
Dan Gohman2ce6f2a2008-07-27 21:46:04 +0000553 std::vector<SDValue> &Ops,
Chris Lattner724539c2008-04-26 23:02:14 +0000554 SelectionDAG &DAG) const;
Chris Lattner44daa502006-10-31 20:13:11 +0000555
Chris Lattnerf4aeff02006-10-18 18:26:48 +0000556 /// getRegForInlineAsmConstraint - Given a physical register constraint
557 /// (e.g. {edx}), return the register number and the register class for the
558 /// register. This should only be used for C_Register constraints. On
559 /// error, this returns a register number of 0.
Chris Lattner524129d2006-07-31 23:26:50 +0000560 std::pair<unsigned, const TargetRegisterClass*>
561 getRegForInlineAsmConstraint(const std::string &Constraint,
Owen Anderson53aa7a92009-08-10 22:56:29 +0000562 EVT VT) const;
Chris Lattner524129d2006-07-31 23:26:50 +0000563
Chris Lattner1eb94d92007-03-30 23:15:24 +0000564 /// isLegalAddressingMode - Return true if the addressing mode represented
565 /// by AM is legal for this target, for a load/store of the specified type.
566 virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const;
567
Evan Cheng7f3d0242007-10-26 01:56:11 +0000568 /// isTruncateFree - Return true if it's free to truncate a value of
569 /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
570 /// register EAX to i16 by referencing its sub-register AX.
571 virtual bool isTruncateFree(const Type *Ty1, const Type *Ty2) const;
Owen Anderson53aa7a92009-08-10 22:56:29 +0000572 virtual bool isTruncateFree(EVT VT1, EVT VT2) const;
Dan Gohmanad3e5492009-04-08 00:15:30 +0000573
574 /// isZExtFree - Return true if any actual instruction that defines a
575 /// value of type Ty1 implicit zero-extends the value to Ty2 in the result
576 /// register. This does not necessarily include registers defined in
577 /// unknown ways, such as incoming arguments, or copies from unknown
578 /// virtual registers. Also, if isTruncateFree(Ty2, Ty1) is true, this
579 /// does not necessarily apply to truncate instructions. e.g. on x86-64,
580 /// all instructions that define 32-bit values implicit zero-extend the
581 /// result out to 64 bits.
582 virtual bool isZExtFree(const Type *Ty1, const Type *Ty2) const;
Owen Anderson53aa7a92009-08-10 22:56:29 +0000583 virtual bool isZExtFree(EVT VT1, EVT VT2) const;
Dan Gohmanad3e5492009-04-08 00:15:30 +0000584
Evan Chenga9cda8a2009-05-28 00:35:15 +0000585 /// isNarrowingProfitable - Return true if it's profitable to narrow
586 /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow
587 /// from i32 to i8 but not from i32 to i16.
Owen Anderson53aa7a92009-08-10 22:56:29 +0000588 virtual bool isNarrowingProfitable(EVT VT1, EVT VT2) const;
Evan Chenga9cda8a2009-05-28 00:35:15 +0000589
Evan Cheng16993aa2009-10-27 19:56:55 +0000590 /// isFPImmLegal - Returns true if the target can instruction select the
591 /// specified FP immediate natively. If false, the legalizer will
592 /// materialize the FP immediate as a load from a constant pool.
Evan Cheng83896a52009-10-28 01:43:28 +0000593 virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
Evan Cheng16993aa2009-10-27 19:56:55 +0000594
Evan Cheng68ad48b2006-03-22 18:59:22 +0000595 /// isShuffleMaskLegal - Targets can use this to indicate that they only
596 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
Chris Lattnerf4aeff02006-10-18 18:26:48 +0000597 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask
598 /// values are assumed to be legal.
Nate Begeman5f829d82009-04-29 05:20:52 +0000599 virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask,
Owen Anderson53aa7a92009-08-10 22:56:29 +0000600 EVT VT) const;
Evan Cheng60f0b892006-04-20 08:58:49 +0000601
602 /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is
603 /// used by Targets can use this to indicate if there is a suitable
604 /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant
605 /// pool entry.
Nate Begeman5f829d82009-04-29 05:20:52 +0000606 virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
Owen Anderson53aa7a92009-08-10 22:56:29 +0000607 EVT VT) const;
Evan Cheng0a62cb42008-03-05 01:30:59 +0000608
609 /// ShouldShrinkFPConstant - If true, then instruction selection should
610 /// seek to shrink the FP constant of the specified type to a smaller type
611 /// in order to save space and / or reduce runtime.
Owen Anderson53aa7a92009-08-10 22:56:29 +0000612 virtual bool ShouldShrinkFPConstant(EVT VT) const {
Evan Cheng0a62cb42008-03-05 01:30:59 +0000613 // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more
614 // expensive than a straight movsd. On the other hand, it's important to
615 // shrink long double fp constant since fldt is very slow.
Owen Anderson9f944592009-08-11 20:47:22 +0000616 return !X86ScalarSSEf64 || VT == MVT::f80;
Evan Cheng0a62cb42008-03-05 01:30:59 +0000617 }
Arnold Schwaighofer9ccea992007-10-11 19:40:01 +0000618
Dan Gohman4df9d9c2010-05-11 16:21:03 +0000619 const X86Subtarget* getSubtarget() const {
Dan Gohman544ab2c2008-04-12 04:36:06 +0000620 return Subtarget;
Rafael Espindolafa0df552007-11-05 23:12:20 +0000621 }
622
Chris Lattner7dc00e82008-01-18 06:52:41 +0000623 /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
624 /// computed in an SSE register, not on the X87 floating point stack.
Owen Anderson53aa7a92009-08-10 22:56:29 +0000625 bool isScalarFPTypeInSSEReg(EVT VT) const {
Owen Anderson9f944592009-08-11 20:47:22 +0000626 return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
627 (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
Chris Lattner7dc00e82008-01-18 06:52:41 +0000628 }
Dan Gohman4619e932008-08-19 21:32:53 +0000629
630 /// createFastISel - This method returns a target specific FastISel object,
631 /// or null if the target does not support "fast" ISel.
Dan Gohman87fb4e82010-07-07 16:29:44 +0000632 virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo) const;
Bill Wendling31ceb1b2009-06-30 22:38:32 +0000633
Bill Wendling512ff732009-07-01 18:50:55 +0000634 /// getFunctionAlignment - Return the Log2 alignment of this function.
Bill Wendling31ceb1b2009-06-30 22:38:32 +0000635 virtual unsigned getFunctionAlignment(const Function *F) const;
636
Evan Cheng37b740c2010-07-24 00:39:05 +0000637 unsigned getRegPressureLimit(const TargetRegisterClass *RC,
638 MachineFunction &MF) const;
639
Eric Christopher2ad0c772010-07-06 05:18:56 +0000640 /// getStackCookieLocation - Return true if the target stores stack
641 /// protector cookies at a fixed offset in some non-standard address
642 /// space, and populates the address space and offset as
643 /// appropriate.
644 virtual bool getStackCookieLocation(unsigned &AddressSpace, unsigned &Offset) const;
645
Evan Chengd4218b82010-07-26 21:50:05 +0000646 protected:
647 std::pair<const TargetRegisterClass*, uint8_t>
648 findRepresentativeClass(EVT VT) const;
649
Chris Lattner76ac0682005-11-15 00:40:23 +0000650 private:
Evan Chenga9467aa2006-04-25 20:13:52 +0000651 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
652 /// make the right decision when generating code for different targets.
653 const X86Subtarget *Subtarget;
Dan Gohmaneabd6472008-05-14 01:58:56 +0000654 const X86RegisterInfo *RegInfo;
Anton Korobeynikov6acb2212008-09-09 18:22:57 +0000655 const TargetData *TD;
Evan Chenga9467aa2006-04-25 20:13:52 +0000656
Evan Cheng11b0a5d2006-09-08 06:48:29 +0000657 /// X86StackPtr - X86 physical register used as stack ptr.
658 unsigned X86StackPtr;
Arnold Schwaighofer9ccea992007-10-11 19:40:01 +0000659
Dale Johannesene36c4002007-09-23 14:52:20 +0000660 /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87
661 /// floating point ops.
662 /// When SSE is available, use it for f32 operations.
663 /// When SSE2 is available, use it for f64 operations.
664 bool X86ScalarSSEf32;
665 bool X86ScalarSSEf64;
Evan Cheng084a1cd2008-01-29 19:34:22 +0000666
Evan Cheng16993aa2009-10-27 19:56:55 +0000667 /// LegalFPImmediates - A list of legal fp immediates.
668 std::vector<APFloat> LegalFPImmediates;
669
670 /// addLegalFPImmediate - Indicate that this x86 target can instruction
671 /// select the specified FP immediate natively.
672 void addLegalFPImmediate(const APFloat& Imm) {
673 LegalFPImmediates.push_back(Imm);
674 }
675
Dan Gohmanf9bbcd12009-08-05 01:29:28 +0000676 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
Sandeep Patel68c5f472009-09-02 08:44:58 +0000677 CallingConv::ID CallConv, bool isVarArg,
Dan Gohmanf9bbcd12009-08-05 01:29:28 +0000678 const SmallVectorImpl<ISD::InputArg> &Ins,
679 DebugLoc dl, SelectionDAG &DAG,
Dan Gohman21cea8a2010-04-17 15:26:15 +0000680 SmallVectorImpl<SDValue> &InVals) const;
Dan Gohmanf9bbcd12009-08-05 01:29:28 +0000681 SDValue LowerMemArgument(SDValue Chain,
Sandeep Patel68c5f472009-09-02 08:44:58 +0000682 CallingConv::ID CallConv,
Dan Gohmanf9bbcd12009-08-05 01:29:28 +0000683 const SmallVectorImpl<ISD::InputArg> &ArgInfo,
684 DebugLoc dl, SelectionDAG &DAG,
685 const CCValAssign &VA, MachineFrameInfo *MFI,
Dan Gohman21cea8a2010-04-17 15:26:15 +0000686 unsigned i) const;
Dan Gohmanf9bbcd12009-08-05 01:29:28 +0000687 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
688 DebugLoc dl, SelectionDAG &DAG,
689 const CCValAssign &VA,
Dan Gohman21cea8a2010-04-17 15:26:15 +0000690 ISD::ArgFlagsTy Flags) const;
Rafael Espindolae636fc02007-08-31 15:06:30 +0000691
Gordon Henriksen92319582008-01-05 16:56:59 +0000692 // Call lowering helpers.
Evan Cheng67a69dd2010-01-27 00:07:07 +0000693
694 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
695 /// for tail call optimization. Targets which want to do tail call
696 /// optimization should implement this function.
Evan Cheng6f36a082010-02-02 23:55:14 +0000697 bool IsEligibleForTailCallOptimization(SDValue Callee,
Evan Cheng67a69dd2010-01-27 00:07:07 +0000698 CallingConv::ID CalleeCC,
699 bool isVarArg,
Evan Chengae5edee2010-03-15 18:54:48 +0000700 bool isCalleeStructRet,
701 bool isCallerStructRet,
Evan Cheng85476f32010-01-27 06:25:16 +0000702 const SmallVectorImpl<ISD::OutputArg> &Outs,
Dan Gohmanfe7532a2010-07-07 15:54:55 +0000703 const SmallVectorImpl<SDValue> &OutVals,
Evan Cheng85476f32010-01-27 06:25:16 +0000704 const SmallVectorImpl<ISD::InputArg> &Ins,
Evan Cheng67a69dd2010-01-27 00:07:07 +0000705 SelectionDAG& DAG) const;
Dan Gohman21cea8a2010-04-17 15:26:15 +0000706 bool IsCalleePop(bool isVarArg, CallingConv::ID CallConv) const;
Dan Gohman2ce6f2a2008-07-27 21:46:04 +0000707 SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr,
708 SDValue Chain, bool IsTailCall, bool Is64Bit,
Dan Gohman21cea8a2010-04-17 15:26:15 +0000709 int FPDiff, DebugLoc dl) const;
Arnold Schwaighofer634fc9a2008-04-12 18:11:06 +0000710
Sandeep Patel68c5f472009-09-02 08:44:58 +0000711 CCAssignFn *CCAssignFnForNode(CallingConv::ID CallConv) const;
Dan Gohman21cea8a2010-04-17 15:26:15 +0000712 unsigned GetAlignedArgumentStackSize(unsigned StackSize,
713 SelectionDAG &DAG) const;
Evan Chengcde9e302006-01-27 08:10:46 +0000714
Eli Friedmandfe4f252009-05-23 09:59:16 +0000715 std::pair<SDValue,SDValue> FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
Dan Gohman21cea8a2010-04-17 15:26:15 +0000716 bool isSigned) const;
Evan Cheng493b8822009-12-09 21:00:30 +0000717
718 SDValue LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl,
Dan Gohman21cea8a2010-04-17 15:26:15 +0000719 SelectionDAG &DAG) const;
720 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
721 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
722 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
723 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
724 SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) const;
725 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
726 SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) const;
727 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
728 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
729 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
Dale Johannesen021052a2009-02-04 20:06:27 +0000730 SDValue LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl,
731 int64_t Offset, SelectionDAG &DAG) const;
Dan Gohman21cea8a2010-04-17 15:26:15 +0000732 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
733 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
734 SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const;
735 SDValue LowerShift(SDValue Op, SelectionDAG &DAG) const;
Owen Anderson53aa7a92009-08-10 22:56:29 +0000736 SDValue BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot,
Dan Gohman21cea8a2010-04-17 15:26:15 +0000737 SelectionDAG &DAG) const;
Dale Johannesenb3b9c8a2010-05-21 00:52:33 +0000738 SDValue LowerBIT_CONVERT(SDValue op, SelectionDAG &DAG) const;
Dan Gohman21cea8a2010-04-17 15:26:15 +0000739 SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
740 SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
741 SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) const;
742 SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG) const;
743 SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const;
744 SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const;
745 SDValue LowerFABS(SDValue Op, SelectionDAG &DAG) const;
746 SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG) const;
747 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
Evan Cheng9c8cd8c2010-04-21 01:47:12 +0000748 SDValue LowerToBT(SDValue And, ISD::CondCode CC,
749 DebugLoc dl, SelectionDAG &DAG) const;
Dan Gohman21cea8a2010-04-17 15:26:15 +0000750 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
751 SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
752 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
753 SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
754 SDValue LowerMEMSET(SDValue Op, SelectionDAG &DAG) const;
755 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
756 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
757 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
758 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
759 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
760 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
761 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
762 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
763 SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const;
764 SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
765 SDValue LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
766 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
767 SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) const;
768 SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) const;
769 SDValue LowerMUL_V2I64(SDValue Op, SelectionDAG &DAG) const;
Nate Begeman269a6da2010-07-27 22:37:06 +0000770 SDValue LowerSHL(SDValue Op, SelectionDAG &DAG) const;
Dan Gohman21cea8a2010-04-17 15:26:15 +0000771 SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) const;
Bill Wendling66835472008-11-24 19:21:46 +0000772
Dan Gohman21cea8a2010-04-17 15:26:15 +0000773 SDValue LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
774 SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const;
775 SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const;
Eric Christopher9a773822010-07-22 02:48:34 +0000776 SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const;
Duncan Sands6ed40142008-12-01 11:39:25 +0000777
Bruno Cardoso Lopes9f20e7a2010-08-21 01:32:18 +0000778 // Utility functions to help LowerVECTOR_SHUFFLE
779 SDValue LowerVECTOR_SHUFFLEv8i16(SDValue Op, SelectionDAG &DAG) const;
780
Dan Gohmanf9bbcd12009-08-05 01:29:28 +0000781 virtual SDValue
782 LowerFormalArguments(SDValue Chain,
Sandeep Patel68c5f472009-09-02 08:44:58 +0000783 CallingConv::ID CallConv, bool isVarArg,
Dan Gohmanf9bbcd12009-08-05 01:29:28 +0000784 const SmallVectorImpl<ISD::InputArg> &Ins,
785 DebugLoc dl, SelectionDAG &DAG,
Dan Gohman21cea8a2010-04-17 15:26:15 +0000786 SmallVectorImpl<SDValue> &InVals) const;
Dan Gohmanf9bbcd12009-08-05 01:29:28 +0000787 virtual SDValue
Evan Cheng6f36a082010-02-02 23:55:14 +0000788 LowerCall(SDValue Chain, SDValue Callee,
Evan Cheng67a69dd2010-01-27 00:07:07 +0000789 CallingConv::ID CallConv, bool isVarArg, bool &isTailCall,
Dan Gohmanf9bbcd12009-08-05 01:29:28 +0000790 const SmallVectorImpl<ISD::OutputArg> &Outs,
Dan Gohmanfe7532a2010-07-07 15:54:55 +0000791 const SmallVectorImpl<SDValue> &OutVals,
Dan Gohmanf9bbcd12009-08-05 01:29:28 +0000792 const SmallVectorImpl<ISD::InputArg> &Ins,
793 DebugLoc dl, SelectionDAG &DAG,
Dan Gohman21cea8a2010-04-17 15:26:15 +0000794 SmallVectorImpl<SDValue> &InVals) const;
Dan Gohmanf9bbcd12009-08-05 01:29:28 +0000795
796 virtual SDValue
797 LowerReturn(SDValue Chain,
Sandeep Patel68c5f472009-09-02 08:44:58 +0000798 CallingConv::ID CallConv, bool isVarArg,
Dan Gohmanf9bbcd12009-08-05 01:29:28 +0000799 const SmallVectorImpl<ISD::OutputArg> &Outs,
Dan Gohmanfe7532a2010-07-07 15:54:55 +0000800 const SmallVectorImpl<SDValue> &OutVals,
Dan Gohman21cea8a2010-04-17 15:26:15 +0000801 DebugLoc dl, SelectionDAG &DAG) const;
Dan Gohmanf9bbcd12009-08-05 01:29:28 +0000802
Kenneth Uildriks07119732009-11-07 02:11:54 +0000803 virtual bool
804 CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
Dan Gohmand7b5ce32010-07-10 09:00:22 +0000805 const SmallVectorImpl<ISD::OutputArg> &Outs,
Dan Gohmanee0cb702010-07-06 22:19:37 +0000806 LLVMContext &Context) const;
Kenneth Uildriks07119732009-11-07 02:11:54 +0000807
Duncan Sands6ed40142008-12-01 11:39:25 +0000808 void ReplaceATOMIC_BINARY_64(SDNode *N, SmallVectorImpl<SDValue> &Results,
Dan Gohman21cea8a2010-04-17 15:26:15 +0000809 SelectionDAG &DAG, unsigned NewOp) const;
Duncan Sands6ed40142008-12-01 11:39:25 +0000810
Eric Christopher9fe912d2009-08-18 22:50:32 +0000811 /// Utility function to emit string processing sse4.2 instructions
812 /// that return in xmm0.
Evan Chengb82b5512009-09-19 10:09:15 +0000813 /// This takes the instruction to expand, the associated machine basic
814 /// block, the number of args, and whether or not the second arg is
815 /// in memory or not.
Eric Christopher9fe912d2009-08-18 22:50:32 +0000816 MachineBasicBlock *EmitPCMP(MachineInstr *BInstr, MachineBasicBlock *BB,
Mon P Wangc576ee92010-04-04 03:10:48 +0000817 unsigned argNum, bool inMem) const;
Eric Christopher9fe912d2009-08-18 22:50:32 +0000818
Mon P Wang3e583932008-05-05 19:05:59 +0000819 /// Utility function to emit atomic bitwise operations (and, or, xor).
Evan Chengb82b5512009-09-19 10:09:15 +0000820 /// It takes the bitwise instruction to expand, the associated machine basic
821 /// block, and the associated X86 opcodes for reg/reg and reg/imm.
Mon P Wang3e583932008-05-05 19:05:59 +0000822 MachineBasicBlock *EmitAtomicBitwiseWithCustomInserter(
823 MachineInstr *BInstr,
824 MachineBasicBlock *BB,
825 unsigned regOpc,
Andrew Lenharthf88d50b2008-06-14 05:48:15 +0000826 unsigned immOpc,
Dale Johannesen5afbf512008-08-19 18:47:28 +0000827 unsigned loadOpc,
828 unsigned cxchgOpc,
Dale Johannesen5afbf512008-08-19 18:47:28 +0000829 unsigned notOpc,
830 unsigned EAXreg,
831 TargetRegisterClass *RC,
Dan Gohman747e55b2009-02-07 16:15:20 +0000832 bool invSrc = false) const;
Dale Johannesen867d5492008-10-02 18:53:47 +0000833
834 MachineBasicBlock *EmitAtomicBit6432WithCustomInserter(
835 MachineInstr *BInstr,
836 MachineBasicBlock *BB,
837 unsigned regOpcL,
838 unsigned regOpcH,
839 unsigned immOpcL,
840 unsigned immOpcH,
Dan Gohman747e55b2009-02-07 16:15:20 +0000841 bool invSrc = false) const;
Mon P Wang3e583932008-05-05 19:05:59 +0000842
843 /// Utility function to emit atomic min and max. It takes the min/max
Bill Wendling189d6712009-03-26 01:46:56 +0000844 /// instruction to expand, the associated basic block, and the associated
845 /// cmov opcode for moving the min or max value.
Mon P Wang3e583932008-05-05 19:05:59 +0000846 MachineBasicBlock *EmitAtomicMinMaxWithCustomInserter(MachineInstr *BInstr,
847 MachineBasicBlock *BB,
Dan Gohman747e55b2009-02-07 16:15:20 +0000848 unsigned cmovOpc) const;
Dan Gohman55d7b2a2009-03-04 19:44:21 +0000849
Dan Gohman0700a562009-08-15 01:38:56 +0000850 /// Utility function to emit the xmm reg save portion of va_start.
851 MachineBasicBlock *EmitVAStartSaveXMMRegsWithCustomInserter(
852 MachineInstr *BInstr,
853 MachineBasicBlock *BB) const;
854
Chris Lattnerd5f4fcc2009-09-02 05:57:00 +0000855 MachineBasicBlock *EmitLoweredSelect(MachineInstr *I,
Dan Gohman25c16532010-05-01 00:01:06 +0000856 MachineBasicBlock *BB) const;
Anton Korobeynikovd5e3fd62010-03-06 19:32:29 +0000857
858 MachineBasicBlock *EmitLoweredMingwAlloca(MachineInstr *MI,
Dan Gohman25c16532010-05-01 00:01:06 +0000859 MachineBasicBlock *BB) const;
Eric Christopherb0e1a452010-06-03 04:07:48 +0000860
861 MachineBasicBlock *EmitLoweredTLSCall(MachineInstr *MI,
862 MachineBasicBlock *BB) const;
Anton Korobeynikovd5e3fd62010-03-06 19:32:29 +0000863
Dan Gohman55d7b2a2009-03-04 19:44:21 +0000864 /// Emit nodes that will be selected as "test Op0,Op0", or something
Dan Gohmanff659b52009-03-07 01:58:32 +0000865 /// equivalent, for use with the given x86 condition code.
Evan Cheng6e45f1d2010-04-26 19:06:11 +0000866 SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG) const;
Dan Gohman55d7b2a2009-03-04 19:44:21 +0000867
868 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
Dan Gohmanff659b52009-03-07 01:58:32 +0000869 /// equivalent, for use with the given x86 condition code.
Evan Cheng6e45f1d2010-04-26 19:06:11 +0000870 SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
Dan Gohman21cea8a2010-04-17 15:26:15 +0000871 SelectionDAG &DAG) const;
Chris Lattner76ac0682005-11-15 00:40:23 +0000872 };
Evan Cheng24422d42008-09-03 00:03:49 +0000873
874 namespace X86 {
Dan Gohman87fb4e82010-07-07 16:29:44 +0000875 FastISel *createFastISel(FunctionLoweringInfo &funcInfo);
Evan Cheng24422d42008-09-03 00:03:49 +0000876 }
Chris Lattner76ac0682005-11-15 00:40:23 +0000877}
878
Chris Lattner76ac0682005-11-15 00:40:23 +0000879#endif // X86ISELLOWERING_H