blob: ccff3a5ea69da9deb3bc86cd0ff90cb381b4926c [file] [log] [blame]
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +00001//===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
Chris Lattner4ee451d2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +00007//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the interfaces that X86 uses to lower LLVM code into a
11// selection DAG.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef X86ISELLOWERING_H
16#define X86ISELLOWERING_H
17
Evan Cheng559806f2006-01-27 08:10:46 +000018#include "X86Subtarget.h"
Anton Korobeynikov2365f512007-07-14 14:06:15 +000019#include "X86RegisterInfo.h"
Gordon Henriksen86737662008-01-05 16:56:59 +000020#include "X86MachineFunctionInfo.h"
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +000021#include "llvm/Target/TargetLowering.h"
Evan Chengddc419c2010-01-26 19:04:47 +000022#include "llvm/Target/TargetOptions.h"
Ted Kremenekb388eb82008-09-03 02:54:11 +000023#include "llvm/CodeGen/FastISel.h"
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +000024#include "llvm/CodeGen/SelectionDAG.h"
Rafael Espindola1b5dcc32007-08-31 15:06:30 +000025#include "llvm/CodeGen/CallingConvLower.h"
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +000026
27namespace llvm {
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +000028 namespace X86ISD {
Evan Chengd9558e02006-01-06 00:43:03 +000029 // X86 Specific DAG Nodes
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +000030 enum NodeType {
31 // Start the numbering where the builtin ops leave off.
Dan Gohman0ba2bcf2008-09-23 18:42:32 +000032 FIRST_NUMBER = ISD::BUILTIN_OP_END,
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +000033
Evan Cheng18efe262007-12-14 02:13:44 +000034 /// BSF - Bit scan forward.
35 /// BSR - Bit scan reverse.
36 BSF,
37 BSR,
38
Evan Chenge3413162006-01-09 18:33:28 +000039 /// SHLD, SHRD - Double shift instructions. These correspond to
40 /// X86::SHLDxx and X86::SHRDxx instructions.
41 SHLD,
42 SHRD,
43
Evan Chengef6ffb12006-01-31 03:14:29 +000044 /// FAND - Bitwise logical AND of floating point values. This corresponds
45 /// to X86::ANDPS or X86::ANDPD.
46 FAND,
47
Evan Cheng68c47cb2007-01-05 07:55:56 +000048 /// FOR - Bitwise logical OR of floating point values. This corresponds
49 /// to X86::ORPS or X86::ORPD.
50 FOR,
51
Evan Cheng223547a2006-01-31 22:28:30 +000052 /// FXOR - Bitwise logical XOR of floating point values. This corresponds
53 /// to X86::XORPS or X86::XORPD.
54 FXOR,
55
Evan Cheng73d6cf12007-01-05 21:37:56 +000056 /// FSRL - Bitwise logical right shift of floating point values. These
57 /// corresponds to X86::PSRLDQ.
Evan Cheng68c47cb2007-01-05 07:55:56 +000058 FSRL,
59
Dan Gohman98ca4f22009-08-05 01:29:28 +000060 /// CALL - These operations represent an abstract X86 call
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +000061 /// instruction, which includes a bunch of information. In particular the
62 /// operands of these node are:
63 ///
64 /// #0 - The incoming token chain
65 /// #1 - The callee
66 /// #2 - The number of arg bytes the caller pushes on the stack.
67 /// #3 - The number of arg bytes the callee pops off the stack.
68 /// #4 - The value to pass in AL/AX/EAX (optional)
69 /// #5 - The value to pass in DL/DX/EDX (optional)
70 ///
71 /// The result values of these nodes are:
72 ///
73 /// #0 - The outgoing token chain
74 /// #1 - The first register result value (optional)
75 /// #2 - The second register result value (optional)
76 ///
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +000077 CALL,
Dan Gohman98ca4f22009-08-05 01:29:28 +000078
Michael J. Spencer6e56b182010-10-20 23:40:27 +000079 /// RDTSC_DAG - This operation implements the lowering for
Andrew Lenharthb873ff32005-11-20 21:41:10 +000080 /// readcyclecounter
81 RDTSC_DAG,
Evan Cheng7df96d62005-12-17 01:21:05 +000082
83 /// X86 compare and logical compare instructions.
Evan Cheng7d6ff3a2007-09-17 17:42:53 +000084 CMP, COMI, UCOMI,
Evan Cheng7df96d62005-12-17 01:21:05 +000085
Dan Gohmanc7a37d42008-12-23 22:45:23 +000086 /// X86 bit-test instructions.
87 BT,
88
Chris Lattner5b856542010-12-20 00:59:46 +000089 /// X86 SetCC. Operand 0 is condition code, and operand 1 is the EFLAGS
90 /// operand, usually produced by a CMP instruction.
Evan Chengd5781fc2005-12-21 20:21:51 +000091 SETCC,
92
Evan Chengad9c0a32009-12-15 00:53:42 +000093 // Same as SETCC except it's materialized with a sbb and the value is all
94 // one's or all zero's.
Chris Lattnerc19d1c32010-12-19 22:08:31 +000095 SETCC_CARRY, // R = carry_bit ? ~0 : 0
Evan Chengad9c0a32009-12-15 00:53:42 +000096
Stuart Hastings865f0932011-06-03 23:53:54 +000097 /// X86 FP SETCC, implemented with CMP{cc}SS/CMP{cc}SD.
98 /// Operands are two FP values to compare; result is a mask of
99 /// 0s or 1s. Generally DTRT for C/C++ with NaNs.
100 FSETCCss, FSETCCsd,
101
Stuart Hastings4fd0dee2011-06-01 04:39:42 +0000102 /// X86 MOVMSK{pd|ps}, extracts sign bits of two or four FP values,
103 /// result in an integer GPR. Needs masking for scalar result.
104 FGETSIGNx86,
105
Chris Lattner2b9f4342009-03-12 06:46:02 +0000106 /// X86 conditional moves. Operand 0 and operand 1 are the two values
107 /// to select from. Operand 2 is the condition code, and operand 3 is the
108 /// flag operand produced by a CMP or TEST instruction. It also writes a
109 /// flag result.
Evan Cheng7df96d62005-12-17 01:21:05 +0000110 CMOV,
Evan Cheng898101c2005-12-19 23:12:38 +0000111
Dan Gohman2004eb62009-03-23 15:40:10 +0000112 /// X86 conditional branches. Operand 0 is the chain operand, operand 1
113 /// is the block to branch if condition is true, operand 2 is the
114 /// condition code, and operand 3 is the flag operand produced by a CMP
Evan Chengd5781fc2005-12-21 20:21:51 +0000115 /// or TEST instruction.
Evan Cheng898101c2005-12-19 23:12:38 +0000116 BRCOND,
Evan Chengb077b842005-12-21 02:39:21 +0000117
Dan Gohman2004eb62009-03-23 15:40:10 +0000118 /// Return with a flag operand. Operand 0 is the chain operand, operand
119 /// 1 is the number of bytes of stack to pop.
Evan Chengb077b842005-12-21 02:39:21 +0000120 RET_FLAG,
Evan Cheng67f92a72006-01-11 22:15:48 +0000121
122 /// REP_STOS - Repeat fill, corresponds to X86::REP_STOSx.
123 REP_STOS,
124
125 /// REP_MOVS - Repeat move, corresponds to X86::REP_MOVSx.
126 REP_MOVS,
Evan Cheng223547a2006-01-31 22:28:30 +0000127
Evan Cheng7ccced62006-02-18 00:15:05 +0000128 /// GlobalBaseReg - On Darwin, this node represents the result of the popl
129 /// at function entry, used for PIC code.
130 GlobalBaseReg,
Evan Chenga0ea0532006-02-23 02:43:52 +0000131
Bill Wendling056292f2008-09-16 21:48:12 +0000132 /// Wrapper - A wrapper node for TargetConstantPool,
133 /// TargetExternalSymbol, and TargetGlobalAddress.
Evan Cheng020d2e82006-02-23 20:41:18 +0000134 Wrapper,
Evan Cheng48090aa2006-03-21 23:01:21 +0000135
Evan Cheng0085a282006-11-30 21:55:46 +0000136 /// WrapperRIP - Special wrapper used under X86-64 PIC mode for RIP
137 /// relative displacements.
138 WrapperRIP,
139
Dale Johannesen0488fb62010-09-30 23:57:10 +0000140 /// MOVQ2DQ - Copies a 64-bit value from an MMX vector to the low word
141 /// of an XMM vector, with the high word zero filled.
Mon P Wangeb38ebf2010-01-24 00:05:03 +0000142 MOVQ2DQ,
143
Dale Johannesen0488fb62010-09-30 23:57:10 +0000144 /// MOVDQ2Q - Copies a 64-bit value from the low word of an XMM vector
145 /// to an MMX vector. If you think this is too close to the previous
146 /// mnemonic, so do I; blame Intel.
147 MOVDQ2Q,
148
Nate Begeman14d12ca2008-02-11 04:19:36 +0000149 /// PEXTRB - Extract an 8-bit value from a vector and zero extend it to
150 /// i32, corresponds to X86::PEXTRB.
151 PEXTRB,
152
Evan Chengb067a1e2006-03-31 19:22:53 +0000153 /// PEXTRW - Extract a 16-bit value from a vector and zero extend it to
Evan Cheng653159f2006-03-31 21:55:24 +0000154 /// i32, corresponds to X86::PEXTRW.
Evan Chengb067a1e2006-03-31 19:22:53 +0000155 PEXTRW,
Evan Cheng653159f2006-03-31 21:55:24 +0000156
Nate Begeman14d12ca2008-02-11 04:19:36 +0000157 /// INSERTPS - Insert any element of a 4 x float vector into any element
158 /// of a destination 4 x floatvector.
159 INSERTPS,
160
161 /// PINSRB - Insert the lower 8-bits of a 32-bit value to a vector,
162 /// corresponds to X86::PINSRB.
163 PINSRB,
164
Evan Cheng653159f2006-03-31 21:55:24 +0000165 /// PINSRW - Insert the lower 16-bits of a 32-bit value to a vector,
166 /// corresponds to X86::PINSRW.
Chris Lattner8f2b4cc2010-02-23 02:07:48 +0000167 PINSRW, MMX_PINSRW,
Evan Cheng8ca29322006-11-10 21:43:37 +0000168
Nate Begemanb9a47b82009-02-23 08:49:38 +0000169 /// PSHUFB - Shuffle 16 8-bit values within a vector.
170 PSHUFB,
Owen Anderson95771af2011-02-25 21:41:48 +0000171
Bruno Cardoso Lopesc1af4772011-07-13 21:36:47 +0000172 /// ANDNP - Bitwise Logical AND NOT of Packed FP values.
173 ANDNP,
Owen Anderson95771af2011-02-25 21:41:48 +0000174
Craig Topper31133842011-11-19 07:33:10 +0000175 /// PSIGN - Copy integer sign.
176 PSIGN,
Owen Anderson95771af2011-02-25 21:41:48 +0000177
Nadav Rotem8ffad562011-09-09 20:29:17 +0000178 /// BLEND family of opcodes
179 BLENDV,
Owen Anderson95771af2011-02-25 21:41:48 +0000180
Craig Topper54f952a2011-11-19 09:02:40 +0000181 /// HADD - Integer horizontal add.
182 HADD,
183
184 /// HSUB - Integer horizontal sub.
185 HSUB,
186
Duncan Sands17470be2011-09-22 20:15:48 +0000187 /// FHADD - Floating point horizontal add.
188 FHADD,
189
190 /// FHSUB - Floating point horizontal sub.
191 FHSUB,
192
Evan Cheng8ca29322006-11-10 21:43:37 +0000193 /// FMAX, FMIN - Floating point max and min.
194 ///
Lauro Ramos Venanciob3a04172007-04-20 21:38:10 +0000195 FMAX, FMIN,
Dan Gohman20382522007-07-10 00:05:58 +0000196
197 /// FRSQRT, FRCP - Floating point reciprocal-sqrt and reciprocal
198 /// approximation. Note that these typically require refinement
199 /// in order to obtain suitable precision.
200 FRSQRT, FRCP,
201
Rafael Espindola094fad32009-04-08 21:14:34 +0000202 // TLSADDR - Thread Local Storage.
203 TLSADDR,
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000204
Eric Christopher30ef0e52010-06-03 04:07:48 +0000205 // TLSCALL - Thread Local Storage. When calling to an OS provided
206 // thunk at the address from an earlier relocation.
207 TLSCALL,
Rafael Espindola094fad32009-04-08 21:14:34 +0000208
Evan Cheng7e2ff772008-05-08 00:57:18 +0000209 // EH_RETURN - Exception Handling helpers.
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +0000210 EH_RETURN,
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000211
Arnold Schwaighofer4fe30732008-03-19 16:39:45 +0000212 /// TC_RETURN - Tail call return.
213 /// operand #0 chain
214 /// operand #1 callee (register or absolute)
215 /// operand #2 stack adjustment
216 /// operand #3 optional in flag
Anton Korobeynikov45b22fa2007-11-16 01:31:51 +0000217 TC_RETURN,
218
Evan Chengd880b972008-05-09 21:53:03 +0000219 // VZEXT_MOVL - Vector move low and zero extend.
220 VZEXT_MOVL,
221
Evan Chengf26ffe92008-05-29 08:22:04 +0000222 // VSHL, VSRL - Vector logical left / right shift.
Nate Begeman30a0de92008-07-17 16:51:19 +0000223 VSHL, VSRL,
Nate Begeman9008ca62009-04-27 18:41:29 +0000224
225 // CMPPD, CMPPS - Vector double/float comparison.
Nate Begeman30a0de92008-07-17 16:51:19 +0000226 // CMPPD, CMPPS - Vector double/float comparison.
227 CMPPD, CMPPS,
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000228
Nate Begeman30a0de92008-07-17 16:51:19 +0000229 // PCMP* - Vector integer comparisons.
230 PCMPEQB, PCMPEQW, PCMPEQD, PCMPEQQ,
Bill Wendlingab55ebd2008-12-12 00:56:36 +0000231 PCMPGTB, PCMPGTW, PCMPGTD, PCMPGTQ,
232
Chris Lattnerb20e0b12010-12-05 07:30:36 +0000233 // ADD, SUB, SMUL, etc. - Arithmetic operations with FLAGS results.
Chris Lattner5b856542010-12-20 00:59:46 +0000234 ADD, SUB, ADC, SBB, SMUL,
Dan Gohmane220c4b2009-09-18 19:59:53 +0000235 INC, DEC, OR, XOR, AND,
Owen Anderson95771af2011-02-25 21:41:48 +0000236
Craig Topper54a11172011-10-14 07:06:56 +0000237 ANDN, // ANDN - Bitwise AND NOT with FLAGS results.
238
Craig Topperb4c94572011-10-21 06:55:01 +0000239 BLSI, // BLSI - Extract lowest set isolated bit
240 BLSMSK, // BLSMSK - Get mask up to lowest set bit
241 BLSR, // BLSR - Reset lowest set bit
242
Chris Lattnerb20e0b12010-12-05 07:30:36 +0000243 UMUL, // LOW, HI, FLAGS = umul LHS, RHS
Evan Cheng73f24c92009-03-30 21:36:47 +0000244
245 // MUL_IMM - X86 specific multiply by immediate.
Eric Christopher71c67532009-07-29 00:28:05 +0000246 MUL_IMM,
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000247
Eric Christopher71c67532009-07-29 00:28:05 +0000248 // PTEST - Vector bitwise comparisons
Dan Gohmand6708ea2009-08-15 01:38:56 +0000249 PTEST,
250
Bruno Cardoso Lopes045573c2010-08-10 23:25:42 +0000251 // TESTP - Vector packed fp sign bitwise comparisons
252 TESTP,
253
Bruno Cardoso Lopes3157ef12010-08-20 22:55:05 +0000254 // Several flavors of instructions with vector shuffle behaviors.
255 PALIGN,
256 PSHUFD,
257 PSHUFHW,
258 PSHUFLW,
259 PSHUFHW_LD,
260 PSHUFLW_LD,
261 SHUFPD,
262 SHUFPS,
263 MOVDDUP,
264 MOVSHDUP,
265 MOVSLDUP,
266 MOVSHDUP_LD,
267 MOVSLDUP_LD,
268 MOVLHPS,
Bruno Cardoso Lopes3157ef12010-08-20 22:55:05 +0000269 MOVLHPD,
Bruno Cardoso Lopesf2db5b42010-08-31 21:15:21 +0000270 MOVHLPS,
Bruno Cardoso Lopes3157ef12010-08-20 22:55:05 +0000271 MOVHLPD,
Bruno Cardoso Lopes56098f52010-09-01 05:08:25 +0000272 MOVLPS,
273 MOVLPD,
Bruno Cardoso Lopes3157ef12010-08-20 22:55:05 +0000274 MOVSD,
275 MOVSS,
276 UNPCKLPS,
277 UNPCKLPD,
278 UNPCKHPS,
279 UNPCKHPD,
280 PUNPCKLBW,
281 PUNPCKLWD,
282 PUNPCKLDQ,
283 PUNPCKLQDQ,
284 PUNPCKHBW,
285 PUNPCKHWD,
286 PUNPCKHDQ,
287 PUNPCKHQDQ,
Bruno Cardoso Lopescea34e42011-07-27 00:56:34 +0000288 VPERMILPS,
289 VPERMILPSY,
290 VPERMILPD,
291 VPERMILPDY,
Bruno Cardoso Lopes53cae132011-08-12 21:48:26 +0000292 VPERM2F128,
Bruno Cardoso Lopes0e6d2302011-08-17 02:29:19 +0000293 VBROADCAST,
Bruno Cardoso Lopes3157ef12010-08-20 22:55:05 +0000294
Dan Gohmand6708ea2009-08-15 01:38:56 +0000295 // VASTART_SAVE_XMM_REGS - Save xmm argument registers to the stack,
296 // according to %al. An operator is needed so that this can be expanded
297 // with control flow.
Dan Gohmanc76909a2009-09-25 20:36:54 +0000298 VASTART_SAVE_XMM_REGS,
299
Michael J. Spencere9c253e2010-10-21 01:41:01 +0000300 // WIN_ALLOCA - Windows's _chkstk call to do stack probing.
301 WIN_ALLOCA,
Anton Korobeynikov043f3c22010-03-06 19:32:29 +0000302
Rafael Espindolad07b7ec2011-08-30 19:43:21 +0000303 // SEG_ALLOCA - For allocating variable amounts of stack space when using
304 // segmented stacks. Check if the current stacklet has enough space, and
Rafael Espindola5c984df2011-09-06 19:29:31 +0000305 // falls back to heap allocation if not.
Rafael Espindolad07b7ec2011-08-30 19:43:21 +0000306 SEG_ALLOCA,
307
Duncan Sands59d2dad2010-11-20 11:25:00 +0000308 // Memory barrier
309 MEMBARRIER,
310 MFENCE,
311 SFENCE,
312 LFENCE,
313
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000314 // ATOMADD64_DAG, ATOMSUB64_DAG, ATOMOR64_DAG, ATOMAND64_DAG,
315 // ATOMXOR64_DAG, ATOMNAND64_DAG, ATOMSWAP64_DAG -
Dan Gohmanc76909a2009-09-25 20:36:54 +0000316 // Atomic 64-bit binary operations.
317 ATOMADD64_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE,
318 ATOMSUB64_DAG,
319 ATOMOR64_DAG,
320 ATOMXOR64_DAG,
321 ATOMAND64_DAG,
322 ATOMNAND64_DAG,
Eric Christopher9a9d2752010-07-22 02:48:34 +0000323 ATOMSWAP64_DAG,
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000324
Eli Friedman43f51ae2011-08-26 21:21:21 +0000325 // LCMPXCHG_DAG, LCMPXCHG8_DAG, LCMPXCHG16_DAG - Compare and swap.
Chris Lattner93c4a5b2010-09-21 23:59:42 +0000326 LCMPXCHG_DAG,
Chris Lattner88641552010-09-22 00:34:38 +0000327 LCMPXCHG8_DAG,
Eli Friedman43f51ae2011-08-26 21:21:21 +0000328 LCMPXCHG16_DAG,
Anton Korobeynikov043f3c22010-03-06 19:32:29 +0000329
Chris Lattner88641552010-09-22 00:34:38 +0000330 // VZEXT_LOAD - Load, scalar_to_vector, and zero extend.
Chris Lattner07290932010-09-22 01:05:16 +0000331 VZEXT_LOAD,
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000332
Chris Lattner2156b792010-09-22 01:11:26 +0000333 // FNSTCW16m - Store FP control world into i16 memory.
334 FNSTCW16m,
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000335
Chris Lattner07290932010-09-22 01:05:16 +0000336 /// FP_TO_INT*_IN_MEM - This instruction implements FP_TO_SINT with the
337 /// integer destination in memory and a FP reg source. This corresponds
338 /// to the X86::FIST*m instructions and the rounding mode change stuff. It
339 /// has two inputs (token chain and address) and two outputs (int value
340 /// and token chain).
341 FP_TO_INT16_IN_MEM,
342 FP_TO_INT32_IN_MEM,
Chris Lattner492a43e2010-09-22 01:28:21 +0000343 FP_TO_INT64_IN_MEM,
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000344
Chris Lattner492a43e2010-09-22 01:28:21 +0000345 /// FILD, FILD_FLAG - This instruction implements SINT_TO_FP with the
346 /// integer source in memory and FP reg result. This corresponds to the
347 /// X86::FILD*m instructions. It has three inputs (token chain, address,
348 /// and source type) and two outputs (FP value and token chain). FILD_FLAG
349 /// also produces a flag).
350 FILD,
351 FILD_FLAG,
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000352
Chris Lattner492a43e2010-09-22 01:28:21 +0000353 /// FLD - This instruction implements an extending load to FP stack slots.
354 /// This corresponds to the X86::FLD32m / X86::FLD64m. It takes a chain
355 /// operand, ptr to load from, and a ValueType node indicating the type
356 /// to load to.
357 FLD,
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000358
Chris Lattner492a43e2010-09-22 01:28:21 +0000359 /// FST - This instruction implements a truncating store to FP stack
360 /// slots. This corresponds to the X86::FST32m / X86::FST64m. It takes a
361 /// chain operand, value to store, address, and a ValueType to store it
362 /// as.
Dan Gohman320afb82010-10-12 18:00:49 +0000363 FST,
364
365 /// VAARG_64 - This instruction grabs the address of the next argument
366 /// from a va_list. (reads and modifies the va_list in memory)
367 VAARG_64
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000368
Anton Korobeynikov043f3c22010-03-06 19:32:29 +0000369 // WARNING: Do not add anything in the end unless you want the node to
370 // have memop! In fact, starting from ATOMADD64_DAG all opcodes will be
371 // thought as target memory ops!
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000372 };
373 }
374
Evan Cheng0d9e9762008-01-29 19:34:22 +0000375 /// Define some predicates that are used for node matching.
376 namespace X86 {
377 /// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand
378 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
Nate Begeman9008ca62009-04-27 18:41:29 +0000379 bool isPSHUFDMask(ShuffleVectorSDNode *N);
Evan Cheng0188ecb2006-03-22 18:59:22 +0000380
Evan Cheng0d9e9762008-01-29 19:34:22 +0000381 /// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand
382 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
Nate Begeman9008ca62009-04-27 18:41:29 +0000383 bool isPSHUFHWMask(ShuffleVectorSDNode *N);
Evan Cheng506d3df2006-03-29 23:07:14 +0000384
Evan Cheng0d9e9762008-01-29 19:34:22 +0000385 /// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand
386 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
Nate Begeman9008ca62009-04-27 18:41:29 +0000387 bool isPSHUFLWMask(ShuffleVectorSDNode *N);
Evan Cheng506d3df2006-03-29 23:07:14 +0000388
Evan Cheng0d9e9762008-01-29 19:34:22 +0000389 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
390 /// specifies a shuffle of elements that is suitable for input to SHUFP*.
Nate Begeman9008ca62009-04-27 18:41:29 +0000391 bool isSHUFPMask(ShuffleVectorSDNode *N);
Evan Cheng14aed5e2006-03-24 01:18:28 +0000392
Evan Cheng0d9e9762008-01-29 19:34:22 +0000393 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
394 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
Nate Begeman9008ca62009-04-27 18:41:29 +0000395 bool isMOVHLPSMask(ShuffleVectorSDNode *N);
Evan Cheng2c0dbd02006-03-24 02:58:06 +0000396
Evan Cheng0d9e9762008-01-29 19:34:22 +0000397 /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
398 /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
399 /// <2, 3, 2, 3>
Nate Begeman9008ca62009-04-27 18:41:29 +0000400 bool isMOVHLPS_v_undef_Mask(ShuffleVectorSDNode *N);
Evan Cheng6e56e2c2006-11-07 22:14:24 +0000401
Evan Cheng0d9e9762008-01-29 19:34:22 +0000402 /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
Nate Begeman9008ca62009-04-27 18:41:29 +0000403 /// specifies a shuffle of elements that is suitable for MOVLP{S|D}.
404 bool isMOVLPMask(ShuffleVectorSDNode *N);
Evan Cheng5ced1d82006-04-06 23:23:56 +0000405
Evan Cheng0d9e9762008-01-29 19:34:22 +0000406 /// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand
Nate Begeman9008ca62009-04-27 18:41:29 +0000407 /// specifies a shuffle of elements that is suitable for MOVHP{S|D}.
Evan Cheng0d9e9762008-01-29 19:34:22 +0000408 /// as well as MOVLHPS.
Nate Begeman0b10b912009-11-07 23:17:15 +0000409 bool isMOVLHPSMask(ShuffleVectorSDNode *N);
Evan Cheng5ced1d82006-04-06 23:23:56 +0000410
Evan Cheng0d9e9762008-01-29 19:34:22 +0000411 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
412 /// specifies a shuffle of elements that is suitable for input to UNPCKL.
Craig Topper6347e862011-11-21 06:57:39 +0000413 bool isUNPCKLMask(ShuffleVectorSDNode *N, bool HasAVX2,
414 bool V2IsSplat = false);
Evan Cheng0038e592006-03-28 00:39:58 +0000415
Evan Cheng0d9e9762008-01-29 19:34:22 +0000416 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
417 /// specifies a shuffle of elements that is suitable for input to UNPCKH.
Craig Topper6347e862011-11-21 06:57:39 +0000418 bool isUNPCKHMask(ShuffleVectorSDNode *N, bool HasAVX2,
419 bool V2IsSplat = false);
Evan Cheng4fcb9222006-03-28 02:43:26 +0000420
Evan Cheng0d9e9762008-01-29 19:34:22 +0000421 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
422 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
423 /// <0, 0, 1, 1>
Nate Begeman9008ca62009-04-27 18:41:29 +0000424 bool isUNPCKL_v_undef_Mask(ShuffleVectorSDNode *N);
Evan Cheng1d5a8cc2006-04-05 07:20:06 +0000425
Evan Cheng0d9e9762008-01-29 19:34:22 +0000426 /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
427 /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
428 /// <2, 2, 3, 3>
Nate Begeman9008ca62009-04-27 18:41:29 +0000429 bool isUNPCKH_v_undef_Mask(ShuffleVectorSDNode *N);
Bill Wendling2f9bb1a2007-04-24 21:16:55 +0000430
Evan Cheng0d9e9762008-01-29 19:34:22 +0000431 /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
432 /// specifies a shuffle of elements that is suitable for input to MOVSS,
433 /// MOVSD, and MOVD, i.e. setting the lowest element.
Nate Begeman9008ca62009-04-27 18:41:29 +0000434 bool isMOVLMask(ShuffleVectorSDNode *N);
Evan Chengd6d1cbd2006-04-11 00:19:04 +0000435
Evan Cheng0d9e9762008-01-29 19:34:22 +0000436 /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
437 /// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
Bruno Cardoso Lopes9123c6f2011-07-26 02:39:28 +0000438 bool isMOVSHDUPMask(ShuffleVectorSDNode *N, const X86Subtarget *Subtarget);
Evan Chengd9539472006-04-14 21:59:03 +0000439
Evan Cheng0d9e9762008-01-29 19:34:22 +0000440 /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
441 /// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
Bruno Cardoso Lopes9123c6f2011-07-26 02:39:28 +0000442 bool isMOVSLDUPMask(ShuffleVectorSDNode *N, const X86Subtarget *Subtarget);
Evan Chengf686d9b2006-10-27 21:08:32 +0000443
Evan Cheng0b457f02008-09-25 20:50:48 +0000444 /// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
445 /// specifies a shuffle of elements that is suitable for input to MOVDDUP.
Nate Begeman9008ca62009-04-27 18:41:29 +0000446 bool isMOVDDUPMask(ShuffleVectorSDNode *N);
Evan Cheng0b457f02008-09-25 20:50:48 +0000447
David Greenec38a03e2011-02-03 15:50:00 +0000448 /// isVEXTRACTF128Index - Return true if the specified
449 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
450 /// suitable for input to VEXTRACTF128.
451 bool isVEXTRACTF128Index(SDNode *N);
452
David Greeneccacdc12011-02-04 16:08:29 +0000453 /// isVINSERTF128Index - Return true if the specified
454 /// INSERT_SUBVECTOR operand specifies a subvector insert that is
455 /// suitable for input to VINSERTF128.
456 bool isVINSERTF128Index(SDNode *N);
457
Evan Cheng0d9e9762008-01-29 19:34:22 +0000458 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
459 /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP*
460 /// instructions.
461 unsigned getShuffleSHUFImmediate(SDNode *N);
Evan Cheng506d3df2006-03-29 23:07:14 +0000462
Evan Cheng0d9e9762008-01-29 19:34:22 +0000463 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
Nate Begemana09008b2009-10-19 02:17:23 +0000464 /// the specified VECTOR_SHUFFLE mask with PSHUFHW instruction.
Evan Cheng0d9e9762008-01-29 19:34:22 +0000465 unsigned getShufflePSHUFHWImmediate(SDNode *N);
Evan Cheng506d3df2006-03-29 23:07:14 +0000466
Nate Begemana09008b2009-10-19 02:17:23 +0000467 /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
468 /// the specified VECTOR_SHUFFLE mask with PSHUFLW instruction.
Evan Cheng0d9e9762008-01-29 19:34:22 +0000469 unsigned getShufflePSHUFLWImmediate(SDNode *N);
Evan Cheng37b73872009-07-30 08:33:02 +0000470
Nate Begemana09008b2009-10-19 02:17:23 +0000471 /// getShufflePALIGNRImmediate - Return the appropriate immediate to shuffle
472 /// the specified VECTOR_SHUFFLE mask with the PALIGNR instruction.
473 unsigned getShufflePALIGNRImmediate(SDNode *N);
474
David Greenec38a03e2011-02-03 15:50:00 +0000475 /// getExtractVEXTRACTF128Immediate - Return the appropriate
476 /// immediate to extract the specified EXTRACT_SUBVECTOR index
477 /// with VEXTRACTF128 instructions.
478 unsigned getExtractVEXTRACTF128Immediate(SDNode *N);
479
David Greeneccacdc12011-02-04 16:08:29 +0000480 /// getInsertVINSERTF128Immediate - Return the appropriate
481 /// immediate to insert at the specified INSERT_SUBVECTOR index
482 /// with VINSERTF128 instructions.
483 unsigned getInsertVINSERTF128Immediate(SDNode *N);
484
Evan Cheng37b73872009-07-30 08:33:02 +0000485 /// isZeroNode - Returns true if Elt is a constant zero or a floating point
486 /// constant +0.0.
487 bool isZeroNode(SDValue Elt);
Anton Korobeynikovb5e01722009-08-05 23:01:26 +0000488
489 /// isOffsetSuitableForCodeModel - Returns true of the given offset can be
490 /// fit into displacement field of the instruction.
491 bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
492 bool hasSymbolicDisplacement = true);
Evan Chengef41ff62011-06-23 17:54:54 +0000493
494
495 /// isCalleePop - Determines whether the callee is required to pop its
496 /// own arguments. Callee pop is necessary to support tail calls.
497 bool isCalleePop(CallingConv::ID CallingConv,
498 bool is64Bit, bool IsVarArg, bool TailCallOpt);
Evan Cheng0d9e9762008-01-29 19:34:22 +0000499 }
500
Chris Lattner91897772006-10-18 18:26:48 +0000501 //===--------------------------------------------------------------------===//
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000502 // X86TargetLowering - X86 Implementation of the TargetLowering interface
503 class X86TargetLowering : public TargetLowering {
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000504 public:
Dan Gohmanc9f5f3f2008-05-14 01:58:56 +0000505 explicit X86TargetLowering(X86TargetMachine &TM);
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000506
Chris Lattnerc64daab2010-01-26 05:02:42 +0000507 virtual unsigned getJumpTableEncoding() const;
Chris Lattner5e1df8d2010-01-25 23:38:14 +0000508
Owen Anderson95771af2011-02-25 21:41:48 +0000509 virtual MVT getShiftAmountTy(EVT LHSTy) const { return MVT::i8; }
510
Chris Lattnerc64daab2010-01-26 05:02:42 +0000511 virtual const MCExpr *
512 LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
513 const MachineBasicBlock *MBB, unsigned uid,
514 MCContext &Ctx) const;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000515
Evan Chengcc415862007-11-09 01:32:10 +0000516 /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
517 /// jumptable.
Chris Lattnerc64daab2010-01-26 05:02:42 +0000518 virtual SDValue getPICJumpTableRelocBase(SDValue Table,
519 SelectionDAG &DAG) const;
Chris Lattner589c6f62010-01-26 06:28:43 +0000520 virtual const MCExpr *
521 getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
522 unsigned JTI, MCContext &Ctx) const;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000523
Chris Lattner54e3efd2007-02-26 04:01:25 +0000524 /// getStackPtrReg - Return the stack pointer register we are using: either
525 /// ESP or RSP.
526 unsigned getStackPtrReg() const { return X86StackPtr; }
Evan Cheng29286502008-01-23 23:17:41 +0000527
528 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
529 /// function arguments in the caller parameter area. For X86, aggregates
530 /// that contains are placed at 16-byte boundaries while the rest are at
531 /// 4-byte boundaries.
Chris Lattnerdb125cf2011-07-18 04:54:35 +0000532 virtual unsigned getByValTypeAlignment(Type *Ty) const;
Evan Chengf0df0312008-05-15 08:39:06 +0000533
534 /// getOptimalMemOpType - Returns the target specific optimal type for load
Evan Chengf28f8bc2010-04-02 19:36:14 +0000535 /// and store operations as a result of memset, memcpy, and memmove
536 /// lowering. If DstAlign is zero that means it's safe to destination
537 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
538 /// means there isn't a need to check it against alignment requirement,
539 /// probably because the source does not need to be loaded. If
Lang Hames15701f82011-10-26 23:50:43 +0000540 /// 'IsZeroVal' is true, that means it's safe to return a
Evan Chengf28f8bc2010-04-02 19:36:14 +0000541 /// non-scalar-integer type, e.g. empty string source, constant, or loaded
Evan Chengc3b0c342010-04-08 07:37:57 +0000542 /// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is
543 /// constant so it does not need to be loaded.
Dan Gohman37f32ee2010-04-16 20:11:05 +0000544 /// It returns EVT::Other if the type should be determined using generic
545 /// target-independent logic.
Evan Chengf28f8bc2010-04-02 19:36:14 +0000546 virtual EVT
Evan Chengc3b0c342010-04-08 07:37:57 +0000547 getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
Lang Hames15701f82011-10-26 23:50:43 +0000548 bool IsZeroVal, bool MemcpyStrSrc,
Dan Gohman37f32ee2010-04-16 20:11:05 +0000549 MachineFunction &MF) const;
Bill Wendlingaf566342009-08-15 21:21:19 +0000550
551 /// allowsUnalignedMemoryAccesses - Returns true if the target allows
552 /// unaligned memory accesses. of the specified type.
553 virtual bool allowsUnalignedMemoryAccesses(EVT VT) const {
554 return true;
555 }
Bill Wendling20c568f2009-06-30 22:38:32 +0000556
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000557 /// LowerOperation - Provide custom lowering hooks for some operations.
558 ///
Dan Gohmand858e902010-04-17 15:26:15 +0000559 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000560
Duncan Sands1607f052008-12-01 11:39:25 +0000561 /// ReplaceNodeResults - Replace the results of node with an illegal result
562 /// type with new values built out of custom code.
Chris Lattner27a6c732007-11-24 07:07:01 +0000563 ///
Duncan Sands1607f052008-12-01 11:39:25 +0000564 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
Dan Gohmand858e902010-04-17 15:26:15 +0000565 SelectionDAG &DAG) const;
Chris Lattner27a6c732007-11-24 07:07:01 +0000566
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000567
Dan Gohman475871a2008-07-27 21:46:04 +0000568 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
Evan Cheng206ee9d2006-07-07 08:33:52 +0000569
Evan Chenge5b51ac2010-04-17 06:13:15 +0000570 /// isTypeDesirableForOp - Return true if the target has native support for
571 /// the specified value type and it is 'desirable' to use the type for the
572 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
573 /// instruction encodings are longer and some i16 instructions are slow.
574 virtual bool isTypeDesirableForOp(unsigned Opc, EVT VT) const;
575
576 /// isTypeDesirable - Return true if the target has native support for the
577 /// specified value type and it is 'desirable' to use the type. e.g. On x86
578 /// i16 is legal, but undesirable since i16 instruction encodings are longer
579 /// and some i16 instructions are slow.
580 virtual bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const;
Evan Cheng64b7bf72010-04-16 06:14:10 +0000581
Dan Gohmanaf1d8ca2010-05-01 00:01:06 +0000582 virtual MachineBasicBlock *
583 EmitInstrWithCustomInserter(MachineInstr *MI,
584 MachineBasicBlock *MBB) const;
Evan Cheng4a460802006-01-11 00:33:36 +0000585
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000586
Evan Cheng72261582005-12-20 06:22:03 +0000587 /// getTargetNodeName - This method returns the name of a target specific
588 /// DAG node.
589 virtual const char *getTargetNodeName(unsigned Opcode) const;
590
Duncan Sands28b77e92011-09-06 19:07:46 +0000591 /// getSetCCResultType - Return the value type to use for ISD::SETCC.
592 virtual EVT getSetCCResultType(EVT VT) const;
Scott Michel5b8f82e2008-03-10 15:42:14 +0000593
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000594 /// computeMaskedBitsForTargetNode - Determine which of the bits specified
595 /// in Mask are known to be either zero or one and return them in the
Nate Begeman368e18d2006-02-16 21:11:51 +0000596 /// KnownZero/KnownOne bitsets.
Dan Gohman475871a2008-07-27 21:46:04 +0000597 virtual void computeMaskedBitsForTargetNode(const SDValue Op,
Dan Gohman977a76f2008-02-13 22:28:48 +0000598 const APInt &Mask,
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000599 APInt &KnownZero,
Dan Gohmanfd29e0e2008-02-13 00:35:47 +0000600 APInt &KnownOne,
Dan Gohmanea859be2007-06-22 14:59:07 +0000601 const SelectionDAG &DAG,
Nate Begeman368e18d2006-02-16 21:11:51 +0000602 unsigned Depth = 0) const;
Evan Chengad4196b2008-05-12 19:56:52 +0000603
Owen Andersonbc146b02010-09-21 20:42:50 +0000604 // ComputeNumSignBitsForTargetNode - Determine the number of bits in the
605 // operation that are sign bits.
606 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
607 unsigned Depth) const;
608
Evan Chengad4196b2008-05-12 19:56:52 +0000609 virtual bool
Dan Gohman46510a72010-04-15 01:51:59 +0000610 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000611
Dan Gohmand858e902010-04-17 15:26:15 +0000612 SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const;
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000613
Chris Lattnerb8105652009-07-20 17:51:36 +0000614 virtual bool ExpandInlineAsm(CallInst *CI) const;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000615
Chris Lattner4234f572007-03-25 02:14:49 +0000616 ConstraintType getConstraintType(const std::string &Constraint) const;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000617
John Thompson44ab89e2010-10-29 17:29:13 +0000618 /// Examine constraint string and operand type and determine a weight value.
John Thompsoneac6e1d2010-09-13 18:15:37 +0000619 /// The operand object must already have been set up with the operand type.
John Thompson44ab89e2010-10-29 17:29:13 +0000620 virtual ConstraintWeight getSingleConstraintMatchWeight(
John Thompsoneac6e1d2010-09-13 18:15:37 +0000621 AsmOperandInfo &info, const char *constraint) const;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000622
Owen Andersone50ed302009-08-10 22:56:29 +0000623 virtual const char *LowerXConstraint(EVT ConstraintVT) const;
Dale Johannesenba2a0b92008-01-29 02:21:21 +0000624
Chris Lattner48884cd2007-08-25 00:47:38 +0000625 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
Evan Chengda43bcf2008-09-24 00:05:32 +0000626 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
627 /// true it means one of the asm constraint of the inline asm instruction
628 /// being processed is 'm'.
Dan Gohman475871a2008-07-27 21:46:04 +0000629 virtual void LowerAsmOperandForConstraint(SDValue Op,
Eric Christopher100c8332011-06-02 23:16:42 +0000630 std::string &Constraint,
Dan Gohman475871a2008-07-27 21:46:04 +0000631 std::vector<SDValue> &Ops,
Chris Lattner5e764232008-04-26 23:02:14 +0000632 SelectionDAG &DAG) const;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000633
Chris Lattner91897772006-10-18 18:26:48 +0000634 /// getRegForInlineAsmConstraint - Given a physical register constraint
635 /// (e.g. {edx}), return the register number and the register class for the
636 /// register. This should only be used for C_Register constraints. On
637 /// error, this returns a register number of 0.
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000638 std::pair<unsigned, const TargetRegisterClass*>
Chris Lattnerf76d1802006-07-31 23:26:50 +0000639 getRegForInlineAsmConstraint(const std::string &Constraint,
Owen Andersone50ed302009-08-10 22:56:29 +0000640 EVT VT) const;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000641
Chris Lattnerc9addb72007-03-30 23:15:24 +0000642 /// isLegalAddressingMode - Return true if the addressing mode represented
643 /// by AM is legal for this target, for a load/store of the specified type.
Chris Lattnerdb125cf2011-07-18 04:54:35 +0000644 virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const;
Chris Lattnerc9addb72007-03-30 23:15:24 +0000645
Evan Cheng2bd122c2007-10-26 01:56:11 +0000646 /// isTruncateFree - Return true if it's free to truncate a value of
647 /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
648 /// register EAX to i16 by referencing its sub-register AX.
Chris Lattnerdb125cf2011-07-18 04:54:35 +0000649 virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const;
Owen Andersone50ed302009-08-10 22:56:29 +0000650 virtual bool isTruncateFree(EVT VT1, EVT VT2) const;
Dan Gohman97121ba2009-04-08 00:15:30 +0000651
652 /// isZExtFree - Return true if any actual instruction that defines a
653 /// value of type Ty1 implicit zero-extends the value to Ty2 in the result
654 /// register. This does not necessarily include registers defined in
655 /// unknown ways, such as incoming arguments, or copies from unknown
656 /// virtual registers. Also, if isTruncateFree(Ty2, Ty1) is true, this
657 /// does not necessarily apply to truncate instructions. e.g. on x86-64,
658 /// all instructions that define 32-bit values implicit zero-extend the
659 /// result out to 64 bits.
Chris Lattnerdb125cf2011-07-18 04:54:35 +0000660 virtual bool isZExtFree(Type *Ty1, Type *Ty2) const;
Owen Andersone50ed302009-08-10 22:56:29 +0000661 virtual bool isZExtFree(EVT VT1, EVT VT2) const;
Dan Gohman97121ba2009-04-08 00:15:30 +0000662
Evan Cheng8b944d32009-05-28 00:35:15 +0000663 /// isNarrowingProfitable - Return true if it's profitable to narrow
664 /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow
665 /// from i32 to i8 but not from i32 to i16.
Owen Andersone50ed302009-08-10 22:56:29 +0000666 virtual bool isNarrowingProfitable(EVT VT1, EVT VT2) const;
Evan Cheng8b944d32009-05-28 00:35:15 +0000667
Evan Chengeb2f9692009-10-27 19:56:55 +0000668 /// isFPImmLegal - Returns true if the target can instruction select the
669 /// specified FP immediate natively. If false, the legalizer will
670 /// materialize the FP immediate as a load from a constant pool.
Evan Chenga1eaa3c2009-10-28 01:43:28 +0000671 virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
Evan Chengeb2f9692009-10-27 19:56:55 +0000672
Evan Cheng0188ecb2006-03-22 18:59:22 +0000673 /// isShuffleMaskLegal - Targets can use this to indicate that they only
674 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
Chris Lattner91897772006-10-18 18:26:48 +0000675 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask
676 /// values are assumed to be legal.
Nate Begeman5a5ca152009-04-29 05:20:52 +0000677 virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask,
Owen Andersone50ed302009-08-10 22:56:29 +0000678 EVT VT) const;
Evan Cheng39623da2006-04-20 08:58:49 +0000679
680 /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is
681 /// used by Targets can use this to indicate if there is a suitable
682 /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant
683 /// pool entry.
Nate Begeman5a5ca152009-04-29 05:20:52 +0000684 virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
Owen Andersone50ed302009-08-10 22:56:29 +0000685 EVT VT) const;
Evan Cheng6fd599f2008-03-05 01:30:59 +0000686
687 /// ShouldShrinkFPConstant - If true, then instruction selection should
688 /// seek to shrink the FP constant of the specified type to a smaller type
689 /// in order to save space and / or reduce runtime.
Owen Andersone50ed302009-08-10 22:56:29 +0000690 virtual bool ShouldShrinkFPConstant(EVT VT) const {
Evan Cheng6fd599f2008-03-05 01:30:59 +0000691 // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more
692 // expensive than a straight movsd. On the other hand, it's important to
693 // shrink long double fp constant since fldt is very slow.
Owen Anderson825b72b2009-08-11 20:47:22 +0000694 return !X86ScalarSSEf64 || VT == MVT::f80;
Evan Cheng6fd599f2008-03-05 01:30:59 +0000695 }
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000696
Dan Gohman419e4f92010-05-11 16:21:03 +0000697 const X86Subtarget* getSubtarget() const {
Dan Gohman707e0182008-04-12 04:36:06 +0000698 return Subtarget;
Rafael Espindolaf1ba1ca2007-11-05 23:12:20 +0000699 }
700
Chris Lattner3d661852008-01-18 06:52:41 +0000701 /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
702 /// computed in an SSE register, not on the X87 floating point stack.
Owen Andersone50ed302009-08-10 22:56:29 +0000703 bool isScalarFPTypeInSSEReg(EVT VT) const {
Owen Anderson825b72b2009-08-11 20:47:22 +0000704 return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
705 (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
Chris Lattner3d661852008-01-18 06:52:41 +0000706 }
Dan Gohmand9f3c482008-08-19 21:32:53 +0000707
708 /// createFastISel - This method returns a target specific FastISel object,
709 /// or null if the target does not support "fast" ISel.
Dan Gohmana4160c32010-07-07 16:29:44 +0000710 virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo) const;
Bill Wendling20c568f2009-06-30 22:38:32 +0000711
Eric Christopherf7a0c7b2010-07-06 05:18:56 +0000712 /// getStackCookieLocation - Return true if the target stores stack
713 /// protector cookies at a fixed offset in some non-standard address
714 /// space, and populates the address space and offset as
715 /// appropriate.
716 virtual bool getStackCookieLocation(unsigned &AddressSpace, unsigned &Offset) const;
717
Stuart Hastingsf99a4b82011-06-06 23:15:58 +0000718 SDValue BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot,
719 SelectionDAG &DAG) const;
720
Evan Chengdee81012010-07-26 21:50:05 +0000721 protected:
722 std::pair<const TargetRegisterClass*, uint8_t>
723 findRepresentativeClass(EVT VT) const;
724
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000725 private:
Evan Cheng0db9fe62006-04-25 20:13:52 +0000726 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
727 /// make the right decision when generating code for different targets.
728 const X86Subtarget *Subtarget;
Dan Gohmanc9f5f3f2008-05-14 01:58:56 +0000729 const X86RegisterInfo *RegInfo;
Anton Korobeynikovbff66b02008-09-09 18:22:57 +0000730 const TargetData *TD;
Evan Cheng0db9fe62006-04-25 20:13:52 +0000731
Evan Cheng25ab6902006-09-08 06:48:29 +0000732 /// X86StackPtr - X86 physical register used as stack ptr.
733 unsigned X86StackPtr;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000734
735 /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87
Dale Johannesenf1fc3a82007-09-23 14:52:20 +0000736 /// floating point ops.
737 /// When SSE is available, use it for f32 operations.
738 /// When SSE2 is available, use it for f64 operations.
739 bool X86ScalarSSEf32;
740 bool X86ScalarSSEf64;
Evan Cheng0d9e9762008-01-29 19:34:22 +0000741
Evan Chengeb2f9692009-10-27 19:56:55 +0000742 /// LegalFPImmediates - A list of legal fp immediates.
743 std::vector<APFloat> LegalFPImmediates;
744
745 /// addLegalFPImmediate - Indicate that this x86 target can instruction
746 /// select the specified FP immediate natively.
747 void addLegalFPImmediate(const APFloat& Imm) {
748 LegalFPImmediates.push_back(Imm);
749 }
750
Dan Gohman98ca4f22009-08-05 01:29:28 +0000751 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
Sandeep Patel65c3c8f2009-09-02 08:44:58 +0000752 CallingConv::ID CallConv, bool isVarArg,
Dan Gohman98ca4f22009-08-05 01:29:28 +0000753 const SmallVectorImpl<ISD::InputArg> &Ins,
754 DebugLoc dl, SelectionDAG &DAG,
Dan Gohmand858e902010-04-17 15:26:15 +0000755 SmallVectorImpl<SDValue> &InVals) const;
Dan Gohman98ca4f22009-08-05 01:29:28 +0000756 SDValue LowerMemArgument(SDValue Chain,
Sandeep Patel65c3c8f2009-09-02 08:44:58 +0000757 CallingConv::ID CallConv,
Dan Gohman98ca4f22009-08-05 01:29:28 +0000758 const SmallVectorImpl<ISD::InputArg> &ArgInfo,
759 DebugLoc dl, SelectionDAG &DAG,
760 const CCValAssign &VA, MachineFrameInfo *MFI,
Dan Gohmand858e902010-04-17 15:26:15 +0000761 unsigned i) const;
Dan Gohman98ca4f22009-08-05 01:29:28 +0000762 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
763 DebugLoc dl, SelectionDAG &DAG,
764 const CCValAssign &VA,
Dan Gohmand858e902010-04-17 15:26:15 +0000765 ISD::ArgFlagsTy Flags) const;
Rafael Espindola1b5dcc32007-08-31 15:06:30 +0000766
Gordon Henriksen86737662008-01-05 16:56:59 +0000767 // Call lowering helpers.
Evan Cheng0c439eb2010-01-27 00:07:07 +0000768
769 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
770 /// for tail call optimization. Targets which want to do tail call
771 /// optimization should implement this function.
Evan Cheng022d9e12010-02-02 23:55:14 +0000772 bool IsEligibleForTailCallOptimization(SDValue Callee,
Evan Cheng0c439eb2010-01-27 00:07:07 +0000773 CallingConv::ID CalleeCC,
774 bool isVarArg,
Evan Chenga375d472010-03-15 18:54:48 +0000775 bool isCalleeStructRet,
776 bool isCallerStructRet,
Evan Chengb1712452010-01-27 06:25:16 +0000777 const SmallVectorImpl<ISD::OutputArg> &Outs,
Dan Gohmanc9403652010-07-07 15:54:55 +0000778 const SmallVectorImpl<SDValue> &OutVals,
Evan Chengb1712452010-01-27 06:25:16 +0000779 const SmallVectorImpl<ISD::InputArg> &Ins,
Evan Cheng0c439eb2010-01-27 00:07:07 +0000780 SelectionDAG& DAG) const;
Dan Gohmand858e902010-04-17 15:26:15 +0000781 bool IsCalleePop(bool isVarArg, CallingConv::ID CallConv) const;
Dan Gohman475871a2008-07-27 21:46:04 +0000782 SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr,
783 SDValue Chain, bool IsTailCall, bool Is64Bit,
Dan Gohmand858e902010-04-17 15:26:15 +0000784 int FPDiff, DebugLoc dl) const;
Arnold Schwaighofer4b5324a2008-04-12 18:11:06 +0000785
Dan Gohmand858e902010-04-17 15:26:15 +0000786 unsigned GetAlignedArgumentStackSize(unsigned StackSize,
787 SelectionDAG &DAG) const;
Evan Cheng559806f2006-01-27 08:10:46 +0000788
Eli Friedman948e95a2009-05-23 09:59:16 +0000789 std::pair<SDValue,SDValue> FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
Dan Gohmand858e902010-04-17 15:26:15 +0000790 bool isSigned) const;
Evan Chengc3630942009-12-09 21:00:30 +0000791
792 SDValue LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl,
Dan Gohmand858e902010-04-17 15:26:15 +0000793 SelectionDAG &DAG) const;
794 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
795 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
796 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
797 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
798 SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) const;
799 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
800 SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) const;
801 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
David Greene91585092011-01-26 15:38:49 +0000802 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
David Greenecfe33c42011-01-26 19:13:22 +0000803 SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
Dan Gohmand858e902010-04-17 15:26:15 +0000804 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
805 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
Dale Johannesen33c960f2009-02-04 20:06:27 +0000806 SDValue LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl,
807 int64_t Offset, SelectionDAG &DAG) const;
Dan Gohmand858e902010-04-17 15:26:15 +0000808 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
809 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
810 SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const;
Nadav Rotem43012222011-05-11 08:12:09 +0000811 SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) const;
Wesley Peckbf17cfa2010-11-23 03:31:01 +0000812 SDValue LowerBITCAST(SDValue op, SelectionDAG &DAG) const;
Dan Gohmand858e902010-04-17 15:26:15 +0000813 SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
814 SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
815 SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) const;
816 SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG) const;
817 SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const;
818 SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const;
819 SDValue LowerFABS(SDValue Op, SelectionDAG &DAG) const;
820 SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG) const;
821 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
Stuart Hastings4fd0dee2011-06-01 04:39:42 +0000822 SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) const;
Evan Cheng5528e7b2010-04-21 01:47:12 +0000823 SDValue LowerToBT(SDValue And, ISD::CondCode CC,
824 DebugLoc dl, SelectionDAG &DAG) const;
Dan Gohmand858e902010-04-17 15:26:15 +0000825 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
826 SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
827 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
828 SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
829 SDValue LowerMEMSET(SDValue Op, SelectionDAG &DAG) const;
830 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
831 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
832 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
833 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
834 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
835 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
836 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
837 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
838 SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const;
839 SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
Duncan Sands4a544a72011-09-06 13:37:06 +0000840 SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
841 SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
Dan Gohmand858e902010-04-17 15:26:15 +0000842 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
843 SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) const;
844 SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) const;
Craig Topper13894fa2011-08-24 06:14:18 +0000845 SDValue LowerADD(SDValue Op, SelectionDAG &DAG) const;
846 SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) const;
847 SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const;
Nadav Rotem43012222011-05-11 08:12:09 +0000848 SDValue LowerShift(SDValue Op, SelectionDAG &DAG) const;
Dan Gohmand858e902010-04-17 15:26:15 +0000849 SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) const;
Bill Wendling41ea7e72008-11-24 19:21:46 +0000850
Dan Gohmand858e902010-04-17 15:26:15 +0000851 SDValue LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
852 SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const;
853 SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const;
Eric Christopher9a9d2752010-07-22 02:48:34 +0000854 SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const;
Eli Friedman14648462011-07-27 22:21:52 +0000855 SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const;
Nadav Rotemd0f3ef82011-07-14 11:11:14 +0000856 SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
Duncan Sands1607f052008-12-01 11:39:25 +0000857
Bruno Cardoso Lopesbf8154a2010-08-21 01:32:18 +0000858 // Utility functions to help LowerVECTOR_SHUFFLE
859 SDValue LowerVECTOR_SHUFFLEv8i16(SDValue Op, SelectionDAG &DAG) const;
860
Dan Gohman98ca4f22009-08-05 01:29:28 +0000861 virtual SDValue
862 LowerFormalArguments(SDValue Chain,
Sandeep Patel65c3c8f2009-09-02 08:44:58 +0000863 CallingConv::ID CallConv, bool isVarArg,
Dan Gohman98ca4f22009-08-05 01:29:28 +0000864 const SmallVectorImpl<ISD::InputArg> &Ins,
865 DebugLoc dl, SelectionDAG &DAG,
Dan Gohmand858e902010-04-17 15:26:15 +0000866 SmallVectorImpl<SDValue> &InVals) const;
Dan Gohman98ca4f22009-08-05 01:29:28 +0000867 virtual SDValue
Evan Cheng022d9e12010-02-02 23:55:14 +0000868 LowerCall(SDValue Chain, SDValue Callee,
Evan Cheng0c439eb2010-01-27 00:07:07 +0000869 CallingConv::ID CallConv, bool isVarArg, bool &isTailCall,
Dan Gohman98ca4f22009-08-05 01:29:28 +0000870 const SmallVectorImpl<ISD::OutputArg> &Outs,
Dan Gohmanc9403652010-07-07 15:54:55 +0000871 const SmallVectorImpl<SDValue> &OutVals,
Dan Gohman98ca4f22009-08-05 01:29:28 +0000872 const SmallVectorImpl<ISD::InputArg> &Ins,
873 DebugLoc dl, SelectionDAG &DAG,
Dan Gohmand858e902010-04-17 15:26:15 +0000874 SmallVectorImpl<SDValue> &InVals) const;
Dan Gohman98ca4f22009-08-05 01:29:28 +0000875
876 virtual SDValue
877 LowerReturn(SDValue Chain,
Sandeep Patel65c3c8f2009-09-02 08:44:58 +0000878 CallingConv::ID CallConv, bool isVarArg,
Dan Gohman98ca4f22009-08-05 01:29:28 +0000879 const SmallVectorImpl<ISD::OutputArg> &Outs,
Dan Gohmanc9403652010-07-07 15:54:55 +0000880 const SmallVectorImpl<SDValue> &OutVals,
Dan Gohmand858e902010-04-17 15:26:15 +0000881 DebugLoc dl, SelectionDAG &DAG) const;
Dan Gohman98ca4f22009-08-05 01:29:28 +0000882
Evan Cheng3d2125c2010-11-30 23:55:39 +0000883 virtual bool isUsedByReturnOnly(SDNode *N) const;
884
Evan Cheng485fafc2011-03-21 01:19:09 +0000885 virtual bool mayBeEmittedAsTailCall(CallInst *CI) const;
886
Cameron Zwarich7bbf0ee2011-03-17 14:53:37 +0000887 virtual EVT
888 getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
889 ISD::NodeType ExtendKind) const;
Cameron Zwarichebe81732011-03-16 22:20:18 +0000890
Kenneth Uildriksb4997ae2009-11-07 02:11:54 +0000891 virtual bool
Eric Christopher471e4222011-06-08 23:55:35 +0000892 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
893 bool isVarArg,
894 const SmallVectorImpl<ISD::OutputArg> &Outs,
895 LLVMContext &Context) const;
Kenneth Uildriksb4997ae2009-11-07 02:11:54 +0000896
Duncan Sands1607f052008-12-01 11:39:25 +0000897 void ReplaceATOMIC_BINARY_64(SDNode *N, SmallVectorImpl<SDValue> &Results,
Dan Gohmand858e902010-04-17 15:26:15 +0000898 SelectionDAG &DAG, unsigned NewOp) const;
Duncan Sands1607f052008-12-01 11:39:25 +0000899
Eric Christopherb120ab42009-08-18 22:50:32 +0000900 /// Utility function to emit string processing sse4.2 instructions
901 /// that return in xmm0.
Evan Cheng431f7752009-09-19 10:09:15 +0000902 /// This takes the instruction to expand, the associated machine basic
903 /// block, the number of args, and whether or not the second arg is
904 /// in memory or not.
Eric Christopherb120ab42009-08-18 22:50:32 +0000905 MachineBasicBlock *EmitPCMP(MachineInstr *BInstr, MachineBasicBlock *BB,
Mon P Wang20adc9d2010-04-04 03:10:48 +0000906 unsigned argNum, bool inMem) const;
Eric Christopherb120ab42009-08-18 22:50:32 +0000907
Eric Christopher228232b2010-11-30 07:20:12 +0000908 /// Utility functions to emit monitor and mwait instructions. These
909 /// need to make sure that the arguments to the intrinsic are in the
910 /// correct registers.
Eric Christopher82be2202010-11-30 08:10:28 +0000911 MachineBasicBlock *EmitMonitor(MachineInstr *MI,
912 MachineBasicBlock *BB) const;
Eric Christopher228232b2010-11-30 07:20:12 +0000913 MachineBasicBlock *EmitMwait(MachineInstr *MI, MachineBasicBlock *BB) const;
914
Mon P Wang63307c32008-05-05 19:05:59 +0000915 /// Utility function to emit atomic bitwise operations (and, or, xor).
Evan Cheng431f7752009-09-19 10:09:15 +0000916 /// It takes the bitwise instruction to expand, the associated machine basic
917 /// block, and the associated X86 opcodes for reg/reg and reg/imm.
Mon P Wang63307c32008-05-05 19:05:59 +0000918 MachineBasicBlock *EmitAtomicBitwiseWithCustomInserter(
919 MachineInstr *BInstr,
920 MachineBasicBlock *BB,
921 unsigned regOpc,
Andrew Lenharth507a58a2008-06-14 05:48:15 +0000922 unsigned immOpc,
Dale Johannesen140be2d2008-08-19 18:47:28 +0000923 unsigned loadOpc,
924 unsigned cxchgOpc,
Dale Johannesen140be2d2008-08-19 18:47:28 +0000925 unsigned notOpc,
926 unsigned EAXreg,
927 TargetRegisterClass *RC,
Dan Gohman1fdbc1d2009-02-07 16:15:20 +0000928 bool invSrc = false) const;
Dale Johannesen48c1bc22008-10-02 18:53:47 +0000929
930 MachineBasicBlock *EmitAtomicBit6432WithCustomInserter(
931 MachineInstr *BInstr,
932 MachineBasicBlock *BB,
933 unsigned regOpcL,
934 unsigned regOpcH,
935 unsigned immOpcL,
936 unsigned immOpcH,
Dan Gohman1fdbc1d2009-02-07 16:15:20 +0000937 bool invSrc = false) const;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000938
Mon P Wang63307c32008-05-05 19:05:59 +0000939 /// Utility function to emit atomic min and max. It takes the min/max
Bill Wendlingbddc4422009-03-26 01:46:56 +0000940 /// instruction to expand, the associated basic block, and the associated
941 /// cmov opcode for moving the min or max value.
Mon P Wang63307c32008-05-05 19:05:59 +0000942 MachineBasicBlock *EmitAtomicMinMaxWithCustomInserter(MachineInstr *BInstr,
943 MachineBasicBlock *BB,
Dan Gohman1fdbc1d2009-02-07 16:15:20 +0000944 unsigned cmovOpc) const;
Dan Gohman076aee32009-03-04 19:44:21 +0000945
Dan Gohman320afb82010-10-12 18:00:49 +0000946 // Utility function to emit the low-level va_arg code for X86-64.
947 MachineBasicBlock *EmitVAARG64WithCustomInserter(
948 MachineInstr *MI,
949 MachineBasicBlock *MBB) const;
950
Dan Gohmand6708ea2009-08-15 01:38:56 +0000951 /// Utility function to emit the xmm reg save portion of va_start.
952 MachineBasicBlock *EmitVAStartSaveXMMRegsWithCustomInserter(
953 MachineInstr *BInstr,
954 MachineBasicBlock *BB) const;
955
Chris Lattner52600972009-09-02 05:57:00 +0000956 MachineBasicBlock *EmitLoweredSelect(MachineInstr *I,
Dan Gohmanaf1d8ca2010-05-01 00:01:06 +0000957 MachineBasicBlock *BB) const;
Anton Korobeynikov043f3c22010-03-06 19:32:29 +0000958
Michael J. Spencere9c253e2010-10-21 01:41:01 +0000959 MachineBasicBlock *EmitLoweredWinAlloca(MachineInstr *MI,
Dan Gohmanaf1d8ca2010-05-01 00:01:06 +0000960 MachineBasicBlock *BB) const;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000961
Rafael Espindola151ab3e2011-08-30 19:47:04 +0000962 MachineBasicBlock *EmitLoweredSegAlloca(MachineInstr *MI,
963 MachineBasicBlock *BB,
964 bool Is64Bit) const;
965
Eric Christopher30ef0e52010-06-03 04:07:48 +0000966 MachineBasicBlock *EmitLoweredTLSCall(MachineInstr *MI,
967 MachineBasicBlock *BB) const;
Anton Korobeynikov043f3c22010-03-06 19:32:29 +0000968
Rafael Espindola5bf7c532010-11-27 20:43:02 +0000969 MachineBasicBlock *emitLoweredTLSAddr(MachineInstr *MI,
970 MachineBasicBlock *BB) const;
971
Dan Gohman076aee32009-03-04 19:44:21 +0000972 /// Emit nodes that will be selected as "test Op0,Op0", or something
Dan Gohman31125812009-03-07 01:58:32 +0000973 /// equivalent, for use with the given x86 condition code.
Evan Cheng552f09a2010-04-26 19:06:11 +0000974 SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG) const;
Dan Gohman076aee32009-03-04 19:44:21 +0000975
976 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
Dan Gohman31125812009-03-07 01:58:32 +0000977 /// equivalent, for use with the given x86 condition code.
Evan Cheng552f09a2010-04-26 19:06:11 +0000978 SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
Dan Gohmand858e902010-04-17 15:26:15 +0000979 SelectionDAG &DAG) const;
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000980 };
Evan Chengc3f44b02008-09-03 00:03:49 +0000981
982 namespace X86 {
Dan Gohmana4160c32010-07-07 16:29:44 +0000983 FastISel *createFastISel(FunctionLoweringInfo &funcInfo);
Evan Chengc3f44b02008-09-03 00:03:49 +0000984 }
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000985}
986
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000987#endif // X86ISELLOWERING_H