blob: 419da3742cf81097cf66b4a3566a8bf63c166697 [file] [log] [blame]
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +00001//===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
Chris Lattner4ee451d2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +00007//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the interfaces that X86 uses to lower LLVM code into a
11// selection DAG.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef X86ISELLOWERING_H
16#define X86ISELLOWERING_H
17
Evan Cheng559806f2006-01-27 08:10:46 +000018#include "X86Subtarget.h"
Anton Korobeynikov2365f512007-07-14 14:06:15 +000019#include "X86RegisterInfo.h"
Gordon Henriksen86737662008-01-05 16:56:59 +000020#include "X86MachineFunctionInfo.h"
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +000021#include "llvm/Target/TargetLowering.h"
Evan Chengddc419c2010-01-26 19:04:47 +000022#include "llvm/Target/TargetOptions.h"
Ted Kremenekb388eb82008-09-03 02:54:11 +000023#include "llvm/CodeGen/FastISel.h"
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +000024#include "llvm/CodeGen/SelectionDAG.h"
Rafael Espindola1b5dcc32007-08-31 15:06:30 +000025#include "llvm/CodeGen/CallingConvLower.h"
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +000026
27namespace llvm {
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +000028 namespace X86ISD {
Evan Chengd9558e02006-01-06 00:43:03 +000029 // X86 Specific DAG Nodes
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +000030 enum NodeType {
31 // Start the numbering where the builtin ops leave off.
Dan Gohman0ba2bcf2008-09-23 18:42:32 +000032 FIRST_NUMBER = ISD::BUILTIN_OP_END,
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +000033
Evan Cheng18efe262007-12-14 02:13:44 +000034 /// BSF - Bit scan forward.
35 /// BSR - Bit scan reverse.
36 BSF,
37 BSR,
38
Evan Chenge3413162006-01-09 18:33:28 +000039 /// SHLD, SHRD - Double shift instructions. These correspond to
40 /// X86::SHLDxx and X86::SHRDxx instructions.
41 SHLD,
42 SHRD,
43
Evan Chengef6ffb12006-01-31 03:14:29 +000044 /// FAND - Bitwise logical AND of floating point values. This corresponds
45 /// to X86::ANDPS or X86::ANDPD.
46 FAND,
47
Evan Cheng68c47cb2007-01-05 07:55:56 +000048 /// FOR - Bitwise logical OR of floating point values. This corresponds
49 /// to X86::ORPS or X86::ORPD.
50 FOR,
51
Evan Cheng223547a2006-01-31 22:28:30 +000052 /// FXOR - Bitwise logical XOR of floating point values. This corresponds
53 /// to X86::XORPS or X86::XORPD.
54 FXOR,
55
Evan Cheng73d6cf12007-01-05 21:37:56 +000056 /// FSRL - Bitwise logical right shift of floating point values. These
57 /// corresponds to X86::PSRLDQ.
Evan Cheng68c47cb2007-01-05 07:55:56 +000058 FSRL,
59
Dan Gohman98ca4f22009-08-05 01:29:28 +000060 /// CALL - These operations represent an abstract X86 call
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +000061 /// instruction, which includes a bunch of information. In particular the
62 /// operands of these node are:
63 ///
64 /// #0 - The incoming token chain
65 /// #1 - The callee
66 /// #2 - The number of arg bytes the caller pushes on the stack.
67 /// #3 - The number of arg bytes the callee pops off the stack.
68 /// #4 - The value to pass in AL/AX/EAX (optional)
69 /// #5 - The value to pass in DL/DX/EDX (optional)
70 ///
71 /// The result values of these nodes are:
72 ///
73 /// #0 - The outgoing token chain
74 /// #1 - The first register result value (optional)
75 /// #2 - The second register result value (optional)
76 ///
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +000077 CALL,
Dan Gohman98ca4f22009-08-05 01:29:28 +000078
Michael J. Spencer6e56b182010-10-20 23:40:27 +000079 /// RDTSC_DAG - This operation implements the lowering for
Andrew Lenharthb873ff32005-11-20 21:41:10 +000080 /// readcyclecounter
81 RDTSC_DAG,
Evan Cheng7df96d62005-12-17 01:21:05 +000082
83 /// X86 compare and logical compare instructions.
Evan Cheng7d6ff3a2007-09-17 17:42:53 +000084 CMP, COMI, UCOMI,
Evan Cheng7df96d62005-12-17 01:21:05 +000085
Dan Gohmanc7a37d42008-12-23 22:45:23 +000086 /// X86 bit-test instructions.
87 BT,
88
Chris Lattner5b856542010-12-20 00:59:46 +000089 /// X86 SetCC. Operand 0 is condition code, and operand 1 is the EFLAGS
90 /// operand, usually produced by a CMP instruction.
Evan Chengd5781fc2005-12-21 20:21:51 +000091 SETCC,
92
Evan Chengad9c0a32009-12-15 00:53:42 +000093 // Same as SETCC except it's materialized with a sbb and the value is all
94 // one's or all zero's.
Chris Lattnerc19d1c32010-12-19 22:08:31 +000095 SETCC_CARRY, // R = carry_bit ? ~0 : 0
Evan Chengad9c0a32009-12-15 00:53:42 +000096
Chris Lattner2b9f4342009-03-12 06:46:02 +000097 /// X86 conditional moves. Operand 0 and operand 1 are the two values
98 /// to select from. Operand 2 is the condition code, and operand 3 is the
99 /// flag operand produced by a CMP or TEST instruction. It also writes a
100 /// flag result.
Evan Cheng7df96d62005-12-17 01:21:05 +0000101 CMOV,
Evan Cheng898101c2005-12-19 23:12:38 +0000102
Dan Gohman2004eb62009-03-23 15:40:10 +0000103 /// X86 conditional branches. Operand 0 is the chain operand, operand 1
104 /// is the block to branch if condition is true, operand 2 is the
105 /// condition code, and operand 3 is the flag operand produced by a CMP
Evan Chengd5781fc2005-12-21 20:21:51 +0000106 /// or TEST instruction.
Evan Cheng898101c2005-12-19 23:12:38 +0000107 BRCOND,
Evan Chengb077b842005-12-21 02:39:21 +0000108
Dan Gohman2004eb62009-03-23 15:40:10 +0000109 /// Return with a flag operand. Operand 0 is the chain operand, operand
110 /// 1 is the number of bytes of stack to pop.
Evan Chengb077b842005-12-21 02:39:21 +0000111 RET_FLAG,
Evan Cheng67f92a72006-01-11 22:15:48 +0000112
113 /// REP_STOS - Repeat fill, corresponds to X86::REP_STOSx.
114 REP_STOS,
115
116 /// REP_MOVS - Repeat move, corresponds to X86::REP_MOVSx.
117 REP_MOVS,
Evan Cheng223547a2006-01-31 22:28:30 +0000118
Evan Cheng7ccced62006-02-18 00:15:05 +0000119 /// GlobalBaseReg - On Darwin, this node represents the result of the popl
120 /// at function entry, used for PIC code.
121 GlobalBaseReg,
Evan Chenga0ea0532006-02-23 02:43:52 +0000122
Bill Wendling056292f2008-09-16 21:48:12 +0000123 /// Wrapper - A wrapper node for TargetConstantPool,
124 /// TargetExternalSymbol, and TargetGlobalAddress.
Evan Cheng020d2e82006-02-23 20:41:18 +0000125 Wrapper,
Evan Cheng48090aa2006-03-21 23:01:21 +0000126
Evan Cheng0085a282006-11-30 21:55:46 +0000127 /// WrapperRIP - Special wrapper used under X86-64 PIC mode for RIP
128 /// relative displacements.
129 WrapperRIP,
130
Dale Johannesen0488fb62010-09-30 23:57:10 +0000131 /// MOVQ2DQ - Copies a 64-bit value from an MMX vector to the low word
132 /// of an XMM vector, with the high word zero filled.
Mon P Wangeb38ebf2010-01-24 00:05:03 +0000133 MOVQ2DQ,
134
Dale Johannesen0488fb62010-09-30 23:57:10 +0000135 /// MOVDQ2Q - Copies a 64-bit value from the low word of an XMM vector
136 /// to an MMX vector. If you think this is too close to the previous
137 /// mnemonic, so do I; blame Intel.
138 MOVDQ2Q,
139
Nate Begeman14d12ca2008-02-11 04:19:36 +0000140 /// PEXTRB - Extract an 8-bit value from a vector and zero extend it to
141 /// i32, corresponds to X86::PEXTRB.
142 PEXTRB,
143
Evan Chengb067a1e2006-03-31 19:22:53 +0000144 /// PEXTRW - Extract a 16-bit value from a vector and zero extend it to
Evan Cheng653159f2006-03-31 21:55:24 +0000145 /// i32, corresponds to X86::PEXTRW.
Evan Chengb067a1e2006-03-31 19:22:53 +0000146 PEXTRW,
Evan Cheng653159f2006-03-31 21:55:24 +0000147
Nate Begeman14d12ca2008-02-11 04:19:36 +0000148 /// INSERTPS - Insert any element of a 4 x float vector into any element
149 /// of a destination 4 x floatvector.
150 INSERTPS,
151
152 /// PINSRB - Insert the lower 8-bits of a 32-bit value to a vector,
153 /// corresponds to X86::PINSRB.
154 PINSRB,
155
Evan Cheng653159f2006-03-31 21:55:24 +0000156 /// PINSRW - Insert the lower 16-bits of a 32-bit value to a vector,
157 /// corresponds to X86::PINSRW.
Chris Lattner8f2b4cc2010-02-23 02:07:48 +0000158 PINSRW, MMX_PINSRW,
Evan Cheng8ca29322006-11-10 21:43:37 +0000159
Nate Begemanb9a47b82009-02-23 08:49:38 +0000160 /// PSHUFB - Shuffle 16 8-bit values within a vector.
161 PSHUFB,
Nate Begemanb65c1752010-12-17 22:55:37 +0000162
163 /// PANDN - and with not'd value.
164 PANDN,
165
166 /// PSIGNB/W/D - Copy integer sign.
167 PSIGNB, PSIGNW, PSIGND,
168
Nate Begeman672fb622010-12-20 22:04:24 +0000169 /// PBLENDVB - Variable blend
170 PBLENDVB,
171
Evan Cheng8ca29322006-11-10 21:43:37 +0000172 /// FMAX, FMIN - Floating point max and min.
173 ///
Lauro Ramos Venanciob3a04172007-04-20 21:38:10 +0000174 FMAX, FMIN,
Dan Gohman20382522007-07-10 00:05:58 +0000175
176 /// FRSQRT, FRCP - Floating point reciprocal-sqrt and reciprocal
177 /// approximation. Note that these typically require refinement
178 /// in order to obtain suitable precision.
179 FRSQRT, FRCP,
180
Rafael Espindola094fad32009-04-08 21:14:34 +0000181 // TLSADDR - Thread Local Storage.
182 TLSADDR,
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000183
Eric Christopher30ef0e52010-06-03 04:07:48 +0000184 // TLSCALL - Thread Local Storage. When calling to an OS provided
185 // thunk at the address from an earlier relocation.
186 TLSCALL,
Rafael Espindola094fad32009-04-08 21:14:34 +0000187
Evan Cheng7e2ff772008-05-08 00:57:18 +0000188 // EH_RETURN - Exception Handling helpers.
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +0000189 EH_RETURN,
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000190
Arnold Schwaighofer4fe30732008-03-19 16:39:45 +0000191 /// TC_RETURN - Tail call return.
192 /// operand #0 chain
193 /// operand #1 callee (register or absolute)
194 /// operand #2 stack adjustment
195 /// operand #3 optional in flag
Anton Korobeynikov45b22fa2007-11-16 01:31:51 +0000196 TC_RETURN,
197
Evan Chengd880b972008-05-09 21:53:03 +0000198 // VZEXT_MOVL - Vector move low and zero extend.
199 VZEXT_MOVL,
200
Evan Chengf26ffe92008-05-29 08:22:04 +0000201 // VSHL, VSRL - Vector logical left / right shift.
Nate Begeman30a0de92008-07-17 16:51:19 +0000202 VSHL, VSRL,
Nate Begeman9008ca62009-04-27 18:41:29 +0000203
204 // CMPPD, CMPPS - Vector double/float comparison.
Nate Begeman30a0de92008-07-17 16:51:19 +0000205 // CMPPD, CMPPS - Vector double/float comparison.
206 CMPPD, CMPPS,
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000207
Nate Begeman30a0de92008-07-17 16:51:19 +0000208 // PCMP* - Vector integer comparisons.
209 PCMPEQB, PCMPEQW, PCMPEQD, PCMPEQQ,
Bill Wendlingab55ebd2008-12-12 00:56:36 +0000210 PCMPGTB, PCMPGTW, PCMPGTD, PCMPGTQ,
211
Chris Lattnerb20e0b12010-12-05 07:30:36 +0000212 // ADD, SUB, SMUL, etc. - Arithmetic operations with FLAGS results.
Chris Lattner5b856542010-12-20 00:59:46 +0000213 ADD, SUB, ADC, SBB, SMUL,
Dan Gohmane220c4b2009-09-18 19:59:53 +0000214 INC, DEC, OR, XOR, AND,
Chris Lattnerb20e0b12010-12-05 07:30:36 +0000215
216 UMUL, // LOW, HI, FLAGS = umul LHS, RHS
Evan Cheng73f24c92009-03-30 21:36:47 +0000217
218 // MUL_IMM - X86 specific multiply by immediate.
Eric Christopher71c67532009-07-29 00:28:05 +0000219 MUL_IMM,
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000220
Eric Christopher71c67532009-07-29 00:28:05 +0000221 // PTEST - Vector bitwise comparisons
Dan Gohmand6708ea2009-08-15 01:38:56 +0000222 PTEST,
223
Bruno Cardoso Lopes045573c2010-08-10 23:25:42 +0000224 // TESTP - Vector packed fp sign bitwise comparisons
225 TESTP,
226
Bruno Cardoso Lopes3157ef12010-08-20 22:55:05 +0000227 // Several flavors of instructions with vector shuffle behaviors.
228 PALIGN,
229 PSHUFD,
230 PSHUFHW,
231 PSHUFLW,
232 PSHUFHW_LD,
233 PSHUFLW_LD,
234 SHUFPD,
235 SHUFPS,
236 MOVDDUP,
237 MOVSHDUP,
238 MOVSLDUP,
239 MOVSHDUP_LD,
240 MOVSLDUP_LD,
241 MOVLHPS,
Bruno Cardoso Lopes3157ef12010-08-20 22:55:05 +0000242 MOVLHPD,
Bruno Cardoso Lopesf2db5b42010-08-31 21:15:21 +0000243 MOVHLPS,
Bruno Cardoso Lopes3157ef12010-08-20 22:55:05 +0000244 MOVHLPD,
Bruno Cardoso Lopes56098f52010-09-01 05:08:25 +0000245 MOVLPS,
246 MOVLPD,
Bruno Cardoso Lopes3157ef12010-08-20 22:55:05 +0000247 MOVSD,
248 MOVSS,
249 UNPCKLPS,
250 UNPCKLPD,
251 UNPCKHPS,
252 UNPCKHPD,
253 PUNPCKLBW,
254 PUNPCKLWD,
255 PUNPCKLDQ,
256 PUNPCKLQDQ,
257 PUNPCKHBW,
258 PUNPCKHWD,
259 PUNPCKHDQ,
260 PUNPCKHQDQ,
261
Dan Gohmand6708ea2009-08-15 01:38:56 +0000262 // VASTART_SAVE_XMM_REGS - Save xmm argument registers to the stack,
263 // according to %al. An operator is needed so that this can be expanded
264 // with control flow.
Dan Gohmanc76909a2009-09-25 20:36:54 +0000265 VASTART_SAVE_XMM_REGS,
266
Michael J. Spencere9c253e2010-10-21 01:41:01 +0000267 // WIN_ALLOCA - Windows's _chkstk call to do stack probing.
268 WIN_ALLOCA,
Anton Korobeynikov043f3c22010-03-06 19:32:29 +0000269
Duncan Sands59d2dad2010-11-20 11:25:00 +0000270 // Memory barrier
271 MEMBARRIER,
272 MFENCE,
273 SFENCE,
274 LFENCE,
275
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000276 // ATOMADD64_DAG, ATOMSUB64_DAG, ATOMOR64_DAG, ATOMAND64_DAG,
277 // ATOMXOR64_DAG, ATOMNAND64_DAG, ATOMSWAP64_DAG -
Dan Gohmanc76909a2009-09-25 20:36:54 +0000278 // Atomic 64-bit binary operations.
279 ATOMADD64_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE,
280 ATOMSUB64_DAG,
281 ATOMOR64_DAG,
282 ATOMXOR64_DAG,
283 ATOMAND64_DAG,
284 ATOMNAND64_DAG,
Eric Christopher9a9d2752010-07-22 02:48:34 +0000285 ATOMSWAP64_DAG,
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000286
Chris Lattner93c4a5b2010-09-21 23:59:42 +0000287 // LCMPXCHG_DAG, LCMPXCHG8_DAG - Compare and swap.
288 LCMPXCHG_DAG,
Chris Lattner88641552010-09-22 00:34:38 +0000289 LCMPXCHG8_DAG,
Anton Korobeynikov043f3c22010-03-06 19:32:29 +0000290
Chris Lattner88641552010-09-22 00:34:38 +0000291 // VZEXT_LOAD - Load, scalar_to_vector, and zero extend.
Chris Lattner07290932010-09-22 01:05:16 +0000292 VZEXT_LOAD,
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000293
Chris Lattner2156b792010-09-22 01:11:26 +0000294 // FNSTCW16m - Store FP control world into i16 memory.
295 FNSTCW16m,
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000296
Chris Lattner07290932010-09-22 01:05:16 +0000297 /// FP_TO_INT*_IN_MEM - This instruction implements FP_TO_SINT with the
298 /// integer destination in memory and a FP reg source. This corresponds
299 /// to the X86::FIST*m instructions and the rounding mode change stuff. It
300 /// has two inputs (token chain and address) and two outputs (int value
301 /// and token chain).
302 FP_TO_INT16_IN_MEM,
303 FP_TO_INT32_IN_MEM,
Chris Lattner492a43e2010-09-22 01:28:21 +0000304 FP_TO_INT64_IN_MEM,
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000305
Chris Lattner492a43e2010-09-22 01:28:21 +0000306 /// FILD, FILD_FLAG - This instruction implements SINT_TO_FP with the
307 /// integer source in memory and FP reg result. This corresponds to the
308 /// X86::FILD*m instructions. It has three inputs (token chain, address,
309 /// and source type) and two outputs (FP value and token chain). FILD_FLAG
310 /// also produces a flag).
311 FILD,
312 FILD_FLAG,
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000313
Chris Lattner492a43e2010-09-22 01:28:21 +0000314 /// FLD - This instruction implements an extending load to FP stack slots.
315 /// This corresponds to the X86::FLD32m / X86::FLD64m. It takes a chain
316 /// operand, ptr to load from, and a ValueType node indicating the type
317 /// to load to.
318 FLD,
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000319
Chris Lattner492a43e2010-09-22 01:28:21 +0000320 /// FST - This instruction implements a truncating store to FP stack
321 /// slots. This corresponds to the X86::FST32m / X86::FST64m. It takes a
322 /// chain operand, value to store, address, and a ValueType to store it
323 /// as.
Dan Gohman320afb82010-10-12 18:00:49 +0000324 FST,
325
326 /// VAARG_64 - This instruction grabs the address of the next argument
327 /// from a va_list. (reads and modifies the va_list in memory)
328 VAARG_64
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000329
Anton Korobeynikov043f3c22010-03-06 19:32:29 +0000330 // WARNING: Do not add anything in the end unless you want the node to
331 // have memop! In fact, starting from ATOMADD64_DAG all opcodes will be
332 // thought as target memory ops!
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000333 };
334 }
335
Evan Cheng0d9e9762008-01-29 19:34:22 +0000336 /// Define some predicates that are used for node matching.
337 namespace X86 {
338 /// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand
339 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
Nate Begeman9008ca62009-04-27 18:41:29 +0000340 bool isPSHUFDMask(ShuffleVectorSDNode *N);
Evan Cheng0188ecb2006-03-22 18:59:22 +0000341
Evan Cheng0d9e9762008-01-29 19:34:22 +0000342 /// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand
343 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
Nate Begeman9008ca62009-04-27 18:41:29 +0000344 bool isPSHUFHWMask(ShuffleVectorSDNode *N);
Evan Cheng506d3df2006-03-29 23:07:14 +0000345
Evan Cheng0d9e9762008-01-29 19:34:22 +0000346 /// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand
347 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
Nate Begeman9008ca62009-04-27 18:41:29 +0000348 bool isPSHUFLWMask(ShuffleVectorSDNode *N);
Evan Cheng506d3df2006-03-29 23:07:14 +0000349
Evan Cheng0d9e9762008-01-29 19:34:22 +0000350 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
351 /// specifies a shuffle of elements that is suitable for input to SHUFP*.
Nate Begeman9008ca62009-04-27 18:41:29 +0000352 bool isSHUFPMask(ShuffleVectorSDNode *N);
Evan Cheng14aed5e2006-03-24 01:18:28 +0000353
Evan Cheng0d9e9762008-01-29 19:34:22 +0000354 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
355 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
Nate Begeman9008ca62009-04-27 18:41:29 +0000356 bool isMOVHLPSMask(ShuffleVectorSDNode *N);
Evan Cheng2c0dbd02006-03-24 02:58:06 +0000357
Evan Cheng0d9e9762008-01-29 19:34:22 +0000358 /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
359 /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
360 /// <2, 3, 2, 3>
Nate Begeman9008ca62009-04-27 18:41:29 +0000361 bool isMOVHLPS_v_undef_Mask(ShuffleVectorSDNode *N);
Evan Cheng6e56e2c2006-11-07 22:14:24 +0000362
Evan Cheng0d9e9762008-01-29 19:34:22 +0000363 /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
Nate Begeman9008ca62009-04-27 18:41:29 +0000364 /// specifies a shuffle of elements that is suitable for MOVLP{S|D}.
365 bool isMOVLPMask(ShuffleVectorSDNode *N);
Evan Cheng5ced1d82006-04-06 23:23:56 +0000366
Evan Cheng0d9e9762008-01-29 19:34:22 +0000367 /// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand
Nate Begeman9008ca62009-04-27 18:41:29 +0000368 /// specifies a shuffle of elements that is suitable for MOVHP{S|D}.
Evan Cheng0d9e9762008-01-29 19:34:22 +0000369 /// as well as MOVLHPS.
Nate Begeman0b10b912009-11-07 23:17:15 +0000370 bool isMOVLHPSMask(ShuffleVectorSDNode *N);
Evan Cheng5ced1d82006-04-06 23:23:56 +0000371
Evan Cheng0d9e9762008-01-29 19:34:22 +0000372 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
373 /// specifies a shuffle of elements that is suitable for input to UNPCKL.
Nate Begeman9008ca62009-04-27 18:41:29 +0000374 bool isUNPCKLMask(ShuffleVectorSDNode *N, bool V2IsSplat = false);
Evan Cheng0038e592006-03-28 00:39:58 +0000375
Evan Cheng0d9e9762008-01-29 19:34:22 +0000376 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
377 /// specifies a shuffle of elements that is suitable for input to UNPCKH.
Nate Begeman9008ca62009-04-27 18:41:29 +0000378 bool isUNPCKHMask(ShuffleVectorSDNode *N, bool V2IsSplat = false);
Evan Cheng4fcb9222006-03-28 02:43:26 +0000379
Evan Cheng0d9e9762008-01-29 19:34:22 +0000380 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
381 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
382 /// <0, 0, 1, 1>
Nate Begeman9008ca62009-04-27 18:41:29 +0000383 bool isUNPCKL_v_undef_Mask(ShuffleVectorSDNode *N);
Evan Cheng1d5a8cc2006-04-05 07:20:06 +0000384
Evan Cheng0d9e9762008-01-29 19:34:22 +0000385 /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
386 /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
387 /// <2, 2, 3, 3>
Nate Begeman9008ca62009-04-27 18:41:29 +0000388 bool isUNPCKH_v_undef_Mask(ShuffleVectorSDNode *N);
Bill Wendling2f9bb1a2007-04-24 21:16:55 +0000389
Evan Cheng0d9e9762008-01-29 19:34:22 +0000390 /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
391 /// specifies a shuffle of elements that is suitable for input to MOVSS,
392 /// MOVSD, and MOVD, i.e. setting the lowest element.
Nate Begeman9008ca62009-04-27 18:41:29 +0000393 bool isMOVLMask(ShuffleVectorSDNode *N);
Evan Chengd6d1cbd2006-04-11 00:19:04 +0000394
Evan Cheng0d9e9762008-01-29 19:34:22 +0000395 /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
396 /// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
Nate Begeman9008ca62009-04-27 18:41:29 +0000397 bool isMOVSHDUPMask(ShuffleVectorSDNode *N);
Evan Chengd9539472006-04-14 21:59:03 +0000398
Evan Cheng0d9e9762008-01-29 19:34:22 +0000399 /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
400 /// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
Nate Begeman9008ca62009-04-27 18:41:29 +0000401 bool isMOVSLDUPMask(ShuffleVectorSDNode *N);
Evan Chengf686d9b2006-10-27 21:08:32 +0000402
Evan Cheng0b457f02008-09-25 20:50:48 +0000403 /// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
404 /// specifies a shuffle of elements that is suitable for input to MOVDDUP.
Nate Begeman9008ca62009-04-27 18:41:29 +0000405 bool isMOVDDUPMask(ShuffleVectorSDNode *N);
Evan Cheng0b457f02008-09-25 20:50:48 +0000406
Nate Begemana09008b2009-10-19 02:17:23 +0000407 /// isPALIGNRMask - Return true if the specified VECTOR_SHUFFLE operand
408 /// specifies a shuffle of elements that is suitable for input to PALIGNR.
409 bool isPALIGNRMask(ShuffleVectorSDNode *N);
410
David Greenec38a03e2011-02-03 15:50:00 +0000411 /// isVEXTRACTF128Index - Return true if the specified
412 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
413 /// suitable for input to VEXTRACTF128.
414 bool isVEXTRACTF128Index(SDNode *N);
415
David Greeneccacdc12011-02-04 16:08:29 +0000416 /// isVINSERTF128Index - Return true if the specified
417 /// INSERT_SUBVECTOR operand specifies a subvector insert that is
418 /// suitable for input to VINSERTF128.
419 bool isVINSERTF128Index(SDNode *N);
420
Evan Cheng0d9e9762008-01-29 19:34:22 +0000421 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
422 /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP*
423 /// instructions.
424 unsigned getShuffleSHUFImmediate(SDNode *N);
Evan Cheng506d3df2006-03-29 23:07:14 +0000425
Evan Cheng0d9e9762008-01-29 19:34:22 +0000426 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
Nate Begemana09008b2009-10-19 02:17:23 +0000427 /// the specified VECTOR_SHUFFLE mask with PSHUFHW instruction.
Evan Cheng0d9e9762008-01-29 19:34:22 +0000428 unsigned getShufflePSHUFHWImmediate(SDNode *N);
Evan Cheng506d3df2006-03-29 23:07:14 +0000429
Nate Begemana09008b2009-10-19 02:17:23 +0000430 /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
431 /// the specified VECTOR_SHUFFLE mask with PSHUFLW instruction.
Evan Cheng0d9e9762008-01-29 19:34:22 +0000432 unsigned getShufflePSHUFLWImmediate(SDNode *N);
Evan Cheng37b73872009-07-30 08:33:02 +0000433
Nate Begemana09008b2009-10-19 02:17:23 +0000434 /// getShufflePALIGNRImmediate - Return the appropriate immediate to shuffle
435 /// the specified VECTOR_SHUFFLE mask with the PALIGNR instruction.
436 unsigned getShufflePALIGNRImmediate(SDNode *N);
437
David Greenec38a03e2011-02-03 15:50:00 +0000438 /// getExtractVEXTRACTF128Immediate - Return the appropriate
439 /// immediate to extract the specified EXTRACT_SUBVECTOR index
440 /// with VEXTRACTF128 instructions.
441 unsigned getExtractVEXTRACTF128Immediate(SDNode *N);
442
David Greeneccacdc12011-02-04 16:08:29 +0000443 /// getInsertVINSERTF128Immediate - Return the appropriate
444 /// immediate to insert at the specified INSERT_SUBVECTOR index
445 /// with VINSERTF128 instructions.
446 unsigned getInsertVINSERTF128Immediate(SDNode *N);
447
Evan Cheng37b73872009-07-30 08:33:02 +0000448 /// isZeroNode - Returns true if Elt is a constant zero or a floating point
449 /// constant +0.0.
450 bool isZeroNode(SDValue Elt);
Anton Korobeynikovb5e01722009-08-05 23:01:26 +0000451
452 /// isOffsetSuitableForCodeModel - Returns true of the given offset can be
453 /// fit into displacement field of the instruction.
454 bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
455 bool hasSymbolicDisplacement = true);
Evan Cheng0d9e9762008-01-29 19:34:22 +0000456 }
457
Chris Lattner91897772006-10-18 18:26:48 +0000458 //===--------------------------------------------------------------------===//
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000459 // X86TargetLowering - X86 Implementation of the TargetLowering interface
460 class X86TargetLowering : public TargetLowering {
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000461 public:
Dan Gohmanc9f5f3f2008-05-14 01:58:56 +0000462 explicit X86TargetLowering(X86TargetMachine &TM);
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000463
Chris Lattnerc64daab2010-01-26 05:02:42 +0000464 virtual unsigned getJumpTableEncoding() const;
Chris Lattner5e1df8d2010-01-25 23:38:14 +0000465
Chris Lattnerc64daab2010-01-26 05:02:42 +0000466 virtual const MCExpr *
467 LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
468 const MachineBasicBlock *MBB, unsigned uid,
469 MCContext &Ctx) const;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000470
Evan Chengcc415862007-11-09 01:32:10 +0000471 /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
472 /// jumptable.
Chris Lattnerc64daab2010-01-26 05:02:42 +0000473 virtual SDValue getPICJumpTableRelocBase(SDValue Table,
474 SelectionDAG &DAG) const;
Chris Lattner589c6f62010-01-26 06:28:43 +0000475 virtual const MCExpr *
476 getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
477 unsigned JTI, MCContext &Ctx) const;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000478
Chris Lattner54e3efd2007-02-26 04:01:25 +0000479 /// getStackPtrReg - Return the stack pointer register we are using: either
480 /// ESP or RSP.
481 unsigned getStackPtrReg() const { return X86StackPtr; }
Evan Cheng29286502008-01-23 23:17:41 +0000482
483 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
484 /// function arguments in the caller parameter area. For X86, aggregates
485 /// that contains are placed at 16-byte boundaries while the rest are at
486 /// 4-byte boundaries.
487 virtual unsigned getByValTypeAlignment(const Type *Ty) const;
Evan Chengf0df0312008-05-15 08:39:06 +0000488
489 /// getOptimalMemOpType - Returns the target specific optimal type for load
Evan Chengf28f8bc2010-04-02 19:36:14 +0000490 /// and store operations as a result of memset, memcpy, and memmove
491 /// lowering. If DstAlign is zero that means it's safe to destination
492 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
493 /// means there isn't a need to check it against alignment requirement,
494 /// probably because the source does not need to be loaded. If
495 /// 'NonScalarIntSafe' is true, that means it's safe to return a
496 /// non-scalar-integer type, e.g. empty string source, constant, or loaded
Evan Chengc3b0c342010-04-08 07:37:57 +0000497 /// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is
498 /// constant so it does not need to be loaded.
Dan Gohman37f32ee2010-04-16 20:11:05 +0000499 /// It returns EVT::Other if the type should be determined using generic
500 /// target-independent logic.
Evan Chengf28f8bc2010-04-02 19:36:14 +0000501 virtual EVT
Evan Chengc3b0c342010-04-08 07:37:57 +0000502 getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
503 bool NonScalarIntSafe, bool MemcpyStrSrc,
Dan Gohman37f32ee2010-04-16 20:11:05 +0000504 MachineFunction &MF) const;
Bill Wendlingaf566342009-08-15 21:21:19 +0000505
506 /// allowsUnalignedMemoryAccesses - Returns true if the target allows
507 /// unaligned memory accesses. of the specified type.
508 virtual bool allowsUnalignedMemoryAccesses(EVT VT) const {
509 return true;
510 }
Bill Wendling20c568f2009-06-30 22:38:32 +0000511
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000512 /// LowerOperation - Provide custom lowering hooks for some operations.
513 ///
Dan Gohmand858e902010-04-17 15:26:15 +0000514 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000515
Duncan Sands1607f052008-12-01 11:39:25 +0000516 /// ReplaceNodeResults - Replace the results of node with an illegal result
517 /// type with new values built out of custom code.
Chris Lattner27a6c732007-11-24 07:07:01 +0000518 ///
Duncan Sands1607f052008-12-01 11:39:25 +0000519 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
Dan Gohmand858e902010-04-17 15:26:15 +0000520 SelectionDAG &DAG) const;
Chris Lattner27a6c732007-11-24 07:07:01 +0000521
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000522
Dan Gohman475871a2008-07-27 21:46:04 +0000523 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
Evan Cheng206ee9d2006-07-07 08:33:52 +0000524
Evan Chenge5b51ac2010-04-17 06:13:15 +0000525 /// isTypeDesirableForOp - Return true if the target has native support for
526 /// the specified value type and it is 'desirable' to use the type for the
527 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
528 /// instruction encodings are longer and some i16 instructions are slow.
529 virtual bool isTypeDesirableForOp(unsigned Opc, EVT VT) const;
530
531 /// isTypeDesirable - Return true if the target has native support for the
532 /// specified value type and it is 'desirable' to use the type. e.g. On x86
533 /// i16 is legal, but undesirable since i16 instruction encodings are longer
534 /// and some i16 instructions are slow.
535 virtual bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const;
Evan Cheng64b7bf72010-04-16 06:14:10 +0000536
Dan Gohmanaf1d8ca2010-05-01 00:01:06 +0000537 virtual MachineBasicBlock *
538 EmitInstrWithCustomInserter(MachineInstr *MI,
539 MachineBasicBlock *MBB) const;
Evan Cheng4a460802006-01-11 00:33:36 +0000540
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000541
Evan Cheng72261582005-12-20 06:22:03 +0000542 /// getTargetNodeName - This method returns the name of a target specific
543 /// DAG node.
544 virtual const char *getTargetNodeName(unsigned Opcode) const;
545
Scott Michel5b8f82e2008-03-10 15:42:14 +0000546 /// getSetCCResultType - Return the ISD::SETCC ValueType
Owen Anderson825b72b2009-08-11 20:47:22 +0000547 virtual MVT::SimpleValueType getSetCCResultType(EVT VT) const;
Scott Michel5b8f82e2008-03-10 15:42:14 +0000548
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000549 /// computeMaskedBitsForTargetNode - Determine which of the bits specified
550 /// in Mask are known to be either zero or one and return them in the
Nate Begeman368e18d2006-02-16 21:11:51 +0000551 /// KnownZero/KnownOne bitsets.
Dan Gohman475871a2008-07-27 21:46:04 +0000552 virtual void computeMaskedBitsForTargetNode(const SDValue Op,
Dan Gohman977a76f2008-02-13 22:28:48 +0000553 const APInt &Mask,
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000554 APInt &KnownZero,
Dan Gohmanfd29e0e2008-02-13 00:35:47 +0000555 APInt &KnownOne,
Dan Gohmanea859be2007-06-22 14:59:07 +0000556 const SelectionDAG &DAG,
Nate Begeman368e18d2006-02-16 21:11:51 +0000557 unsigned Depth = 0) const;
Evan Chengad4196b2008-05-12 19:56:52 +0000558
Owen Andersonbc146b02010-09-21 20:42:50 +0000559 // ComputeNumSignBitsForTargetNode - Determine the number of bits in the
560 // operation that are sign bits.
561 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
562 unsigned Depth) const;
563
Evan Chengad4196b2008-05-12 19:56:52 +0000564 virtual bool
Dan Gohman46510a72010-04-15 01:51:59 +0000565 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000566
Dan Gohmand858e902010-04-17 15:26:15 +0000567 SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const;
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000568
Chris Lattnerb8105652009-07-20 17:51:36 +0000569 virtual bool ExpandInlineAsm(CallInst *CI) const;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000570
Chris Lattner4234f572007-03-25 02:14:49 +0000571 ConstraintType getConstraintType(const std::string &Constraint) const;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000572
John Thompson44ab89e2010-10-29 17:29:13 +0000573 /// Examine constraint string and operand type and determine a weight value.
John Thompsoneac6e1d2010-09-13 18:15:37 +0000574 /// The operand object must already have been set up with the operand type.
John Thompson44ab89e2010-10-29 17:29:13 +0000575 virtual ConstraintWeight getSingleConstraintMatchWeight(
John Thompsoneac6e1d2010-09-13 18:15:37 +0000576 AsmOperandInfo &info, const char *constraint) const;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000577
578 std::vector<unsigned>
Chris Lattner1efa40f2006-02-22 00:56:39 +0000579 getRegClassForInlineAsmConstraint(const std::string &Constraint,
Owen Andersone50ed302009-08-10 22:56:29 +0000580 EVT VT) const;
Chris Lattner48884cd2007-08-25 00:47:38 +0000581
Owen Andersone50ed302009-08-10 22:56:29 +0000582 virtual const char *LowerXConstraint(EVT ConstraintVT) const;
Dale Johannesenba2a0b92008-01-29 02:21:21 +0000583
Chris Lattner48884cd2007-08-25 00:47:38 +0000584 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
Evan Chengda43bcf2008-09-24 00:05:32 +0000585 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
586 /// true it means one of the asm constraint of the inline asm instruction
587 /// being processed is 'm'.
Dan Gohman475871a2008-07-27 21:46:04 +0000588 virtual void LowerAsmOperandForConstraint(SDValue Op,
Chris Lattner48884cd2007-08-25 00:47:38 +0000589 char ConstraintLetter,
Dan Gohman475871a2008-07-27 21:46:04 +0000590 std::vector<SDValue> &Ops,
Chris Lattner5e764232008-04-26 23:02:14 +0000591 SelectionDAG &DAG) const;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000592
Chris Lattner91897772006-10-18 18:26:48 +0000593 /// getRegForInlineAsmConstraint - Given a physical register constraint
594 /// (e.g. {edx}), return the register number and the register class for the
595 /// register. This should only be used for C_Register constraints. On
596 /// error, this returns a register number of 0.
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000597 std::pair<unsigned, const TargetRegisterClass*>
Chris Lattnerf76d1802006-07-31 23:26:50 +0000598 getRegForInlineAsmConstraint(const std::string &Constraint,
Owen Andersone50ed302009-08-10 22:56:29 +0000599 EVT VT) const;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000600
Chris Lattnerc9addb72007-03-30 23:15:24 +0000601 /// isLegalAddressingMode - Return true if the addressing mode represented
602 /// by AM is legal for this target, for a load/store of the specified type.
603 virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const;
604
Evan Cheng2bd122c2007-10-26 01:56:11 +0000605 /// isTruncateFree - Return true if it's free to truncate a value of
606 /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
607 /// register EAX to i16 by referencing its sub-register AX.
608 virtual bool isTruncateFree(const Type *Ty1, const Type *Ty2) const;
Owen Andersone50ed302009-08-10 22:56:29 +0000609 virtual bool isTruncateFree(EVT VT1, EVT VT2) const;
Dan Gohman97121ba2009-04-08 00:15:30 +0000610
611 /// isZExtFree - Return true if any actual instruction that defines a
612 /// value of type Ty1 implicit zero-extends the value to Ty2 in the result
613 /// register. This does not necessarily include registers defined in
614 /// unknown ways, such as incoming arguments, or copies from unknown
615 /// virtual registers. Also, if isTruncateFree(Ty2, Ty1) is true, this
616 /// does not necessarily apply to truncate instructions. e.g. on x86-64,
617 /// all instructions that define 32-bit values implicit zero-extend the
618 /// result out to 64 bits.
619 virtual bool isZExtFree(const Type *Ty1, const Type *Ty2) const;
Owen Andersone50ed302009-08-10 22:56:29 +0000620 virtual bool isZExtFree(EVT VT1, EVT VT2) const;
Dan Gohman97121ba2009-04-08 00:15:30 +0000621
Evan Cheng8b944d32009-05-28 00:35:15 +0000622 /// isNarrowingProfitable - Return true if it's profitable to narrow
623 /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow
624 /// from i32 to i8 but not from i32 to i16.
Owen Andersone50ed302009-08-10 22:56:29 +0000625 virtual bool isNarrowingProfitable(EVT VT1, EVT VT2) const;
Evan Cheng8b944d32009-05-28 00:35:15 +0000626
Evan Chengeb2f9692009-10-27 19:56:55 +0000627 /// isFPImmLegal - Returns true if the target can instruction select the
628 /// specified FP immediate natively. If false, the legalizer will
629 /// materialize the FP immediate as a load from a constant pool.
Evan Chenga1eaa3c2009-10-28 01:43:28 +0000630 virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
Evan Chengeb2f9692009-10-27 19:56:55 +0000631
Evan Cheng0188ecb2006-03-22 18:59:22 +0000632 /// isShuffleMaskLegal - Targets can use this to indicate that they only
633 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
Chris Lattner91897772006-10-18 18:26:48 +0000634 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask
635 /// values are assumed to be legal.
Nate Begeman5a5ca152009-04-29 05:20:52 +0000636 virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask,
Owen Andersone50ed302009-08-10 22:56:29 +0000637 EVT VT) const;
Evan Cheng39623da2006-04-20 08:58:49 +0000638
639 /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is
640 /// used by Targets can use this to indicate if there is a suitable
641 /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant
642 /// pool entry.
Nate Begeman5a5ca152009-04-29 05:20:52 +0000643 virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
Owen Andersone50ed302009-08-10 22:56:29 +0000644 EVT VT) const;
Evan Cheng6fd599f2008-03-05 01:30:59 +0000645
646 /// ShouldShrinkFPConstant - If true, then instruction selection should
647 /// seek to shrink the FP constant of the specified type to a smaller type
648 /// in order to save space and / or reduce runtime.
Owen Andersone50ed302009-08-10 22:56:29 +0000649 virtual bool ShouldShrinkFPConstant(EVT VT) const {
Evan Cheng6fd599f2008-03-05 01:30:59 +0000650 // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more
651 // expensive than a straight movsd. On the other hand, it's important to
652 // shrink long double fp constant since fldt is very slow.
Owen Anderson825b72b2009-08-11 20:47:22 +0000653 return !X86ScalarSSEf64 || VT == MVT::f80;
Evan Cheng6fd599f2008-03-05 01:30:59 +0000654 }
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000655
Dan Gohman419e4f92010-05-11 16:21:03 +0000656 const X86Subtarget* getSubtarget() const {
Dan Gohman707e0182008-04-12 04:36:06 +0000657 return Subtarget;
Rafael Espindolaf1ba1ca2007-11-05 23:12:20 +0000658 }
659
Chris Lattner3d661852008-01-18 06:52:41 +0000660 /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
661 /// computed in an SSE register, not on the X87 floating point stack.
Owen Andersone50ed302009-08-10 22:56:29 +0000662 bool isScalarFPTypeInSSEReg(EVT VT) const {
Owen Anderson825b72b2009-08-11 20:47:22 +0000663 return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
664 (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
Chris Lattner3d661852008-01-18 06:52:41 +0000665 }
Dan Gohmand9f3c482008-08-19 21:32:53 +0000666
667 /// createFastISel - This method returns a target specific FastISel object,
668 /// or null if the target does not support "fast" ISel.
Dan Gohmana4160c32010-07-07 16:29:44 +0000669 virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo) const;
Bill Wendling20c568f2009-06-30 22:38:32 +0000670
Bill Wendlingb4202b82009-07-01 18:50:55 +0000671 /// getFunctionAlignment - Return the Log2 alignment of this function.
Bill Wendling20c568f2009-06-30 22:38:32 +0000672 virtual unsigned getFunctionAlignment(const Function *F) const;
673
Evan Cheng70017e42010-07-24 00:39:05 +0000674 unsigned getRegPressureLimit(const TargetRegisterClass *RC,
675 MachineFunction &MF) const;
676
Eric Christopherf7a0c7b2010-07-06 05:18:56 +0000677 /// getStackCookieLocation - Return true if the target stores stack
678 /// protector cookies at a fixed offset in some non-standard address
679 /// space, and populates the address space and offset as
680 /// appropriate.
681 virtual bool getStackCookieLocation(unsigned &AddressSpace, unsigned &Offset) const;
682
Evan Chengdee81012010-07-26 21:50:05 +0000683 protected:
684 std::pair<const TargetRegisterClass*, uint8_t>
685 findRepresentativeClass(EVT VT) const;
686
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000687 private:
Evan Cheng0db9fe62006-04-25 20:13:52 +0000688 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
689 /// make the right decision when generating code for different targets.
690 const X86Subtarget *Subtarget;
Dan Gohmanc9f5f3f2008-05-14 01:58:56 +0000691 const X86RegisterInfo *RegInfo;
Anton Korobeynikovbff66b02008-09-09 18:22:57 +0000692 const TargetData *TD;
Evan Cheng0db9fe62006-04-25 20:13:52 +0000693
Evan Cheng25ab6902006-09-08 06:48:29 +0000694 /// X86StackPtr - X86 physical register used as stack ptr.
695 unsigned X86StackPtr;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000696
697 /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87
Dale Johannesenf1fc3a82007-09-23 14:52:20 +0000698 /// floating point ops.
699 /// When SSE is available, use it for f32 operations.
700 /// When SSE2 is available, use it for f64 operations.
701 bool X86ScalarSSEf32;
702 bool X86ScalarSSEf64;
Evan Cheng0d9e9762008-01-29 19:34:22 +0000703
Evan Chengeb2f9692009-10-27 19:56:55 +0000704 /// LegalFPImmediates - A list of legal fp immediates.
705 std::vector<APFloat> LegalFPImmediates;
706
707 /// addLegalFPImmediate - Indicate that this x86 target can instruction
708 /// select the specified FP immediate natively.
709 void addLegalFPImmediate(const APFloat& Imm) {
710 LegalFPImmediates.push_back(Imm);
711 }
712
Dan Gohman98ca4f22009-08-05 01:29:28 +0000713 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
Sandeep Patel65c3c8f2009-09-02 08:44:58 +0000714 CallingConv::ID CallConv, bool isVarArg,
Dan Gohman98ca4f22009-08-05 01:29:28 +0000715 const SmallVectorImpl<ISD::InputArg> &Ins,
716 DebugLoc dl, SelectionDAG &DAG,
Dan Gohmand858e902010-04-17 15:26:15 +0000717 SmallVectorImpl<SDValue> &InVals) const;
Dan Gohman98ca4f22009-08-05 01:29:28 +0000718 SDValue LowerMemArgument(SDValue Chain,
Sandeep Patel65c3c8f2009-09-02 08:44:58 +0000719 CallingConv::ID CallConv,
Dan Gohman98ca4f22009-08-05 01:29:28 +0000720 const SmallVectorImpl<ISD::InputArg> &ArgInfo,
721 DebugLoc dl, SelectionDAG &DAG,
722 const CCValAssign &VA, MachineFrameInfo *MFI,
Dan Gohmand858e902010-04-17 15:26:15 +0000723 unsigned i) const;
Dan Gohman98ca4f22009-08-05 01:29:28 +0000724 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
725 DebugLoc dl, SelectionDAG &DAG,
726 const CCValAssign &VA,
Dan Gohmand858e902010-04-17 15:26:15 +0000727 ISD::ArgFlagsTy Flags) const;
Rafael Espindola1b5dcc32007-08-31 15:06:30 +0000728
Gordon Henriksen86737662008-01-05 16:56:59 +0000729 // Call lowering helpers.
Evan Cheng0c439eb2010-01-27 00:07:07 +0000730
731 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
732 /// for tail call optimization. Targets which want to do tail call
733 /// optimization should implement this function.
Evan Cheng022d9e12010-02-02 23:55:14 +0000734 bool IsEligibleForTailCallOptimization(SDValue Callee,
Evan Cheng0c439eb2010-01-27 00:07:07 +0000735 CallingConv::ID CalleeCC,
736 bool isVarArg,
Evan Chenga375d472010-03-15 18:54:48 +0000737 bool isCalleeStructRet,
738 bool isCallerStructRet,
Evan Chengb1712452010-01-27 06:25:16 +0000739 const SmallVectorImpl<ISD::OutputArg> &Outs,
Dan Gohmanc9403652010-07-07 15:54:55 +0000740 const SmallVectorImpl<SDValue> &OutVals,
Evan Chengb1712452010-01-27 06:25:16 +0000741 const SmallVectorImpl<ISD::InputArg> &Ins,
Evan Cheng0c439eb2010-01-27 00:07:07 +0000742 SelectionDAG& DAG) const;
Dan Gohmand858e902010-04-17 15:26:15 +0000743 bool IsCalleePop(bool isVarArg, CallingConv::ID CallConv) const;
Dan Gohman475871a2008-07-27 21:46:04 +0000744 SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr,
745 SDValue Chain, bool IsTailCall, bool Is64Bit,
Dan Gohmand858e902010-04-17 15:26:15 +0000746 int FPDiff, DebugLoc dl) const;
Arnold Schwaighofer4b5324a2008-04-12 18:11:06 +0000747
Dan Gohmand858e902010-04-17 15:26:15 +0000748 unsigned GetAlignedArgumentStackSize(unsigned StackSize,
749 SelectionDAG &DAG) const;
Evan Cheng559806f2006-01-27 08:10:46 +0000750
Eli Friedman948e95a2009-05-23 09:59:16 +0000751 std::pair<SDValue,SDValue> FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
Dan Gohmand858e902010-04-17 15:26:15 +0000752 bool isSigned) const;
Evan Chengc3630942009-12-09 21:00:30 +0000753
754 SDValue LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl,
Dan Gohmand858e902010-04-17 15:26:15 +0000755 SelectionDAG &DAG) const;
756 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
757 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
758 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
759 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
760 SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) const;
761 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
762 SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) const;
763 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
David Greene91585092011-01-26 15:38:49 +0000764 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
David Greenecfe33c42011-01-26 19:13:22 +0000765 SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
Dan Gohmand858e902010-04-17 15:26:15 +0000766 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
767 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
Dale Johannesen33c960f2009-02-04 20:06:27 +0000768 SDValue LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl,
769 int64_t Offset, SelectionDAG &DAG) const;
Dan Gohmand858e902010-04-17 15:26:15 +0000770 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
771 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
772 SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const;
773 SDValue LowerShift(SDValue Op, SelectionDAG &DAG) const;
Owen Andersone50ed302009-08-10 22:56:29 +0000774 SDValue BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot,
Dan Gohmand858e902010-04-17 15:26:15 +0000775 SelectionDAG &DAG) const;
Wesley Peckbf17cfa2010-11-23 03:31:01 +0000776 SDValue LowerBITCAST(SDValue op, SelectionDAG &DAG) const;
Dan Gohmand858e902010-04-17 15:26:15 +0000777 SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
778 SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
779 SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) const;
780 SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG) const;
781 SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const;
782 SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const;
783 SDValue LowerFABS(SDValue Op, SelectionDAG &DAG) const;
784 SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG) const;
785 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
Evan Cheng5528e7b2010-04-21 01:47:12 +0000786 SDValue LowerToBT(SDValue And, ISD::CondCode CC,
787 DebugLoc dl, SelectionDAG &DAG) const;
Dan Gohmand858e902010-04-17 15:26:15 +0000788 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
789 SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
790 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
791 SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
792 SDValue LowerMEMSET(SDValue Op, SelectionDAG &DAG) const;
793 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
794 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
795 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
796 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
797 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
798 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
799 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
800 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
801 SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const;
802 SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
803 SDValue LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
804 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
805 SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) const;
806 SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) const;
807 SDValue LowerMUL_V2I64(SDValue Op, SelectionDAG &DAG) const;
Nate Begemanbdcb5af2010-07-27 22:37:06 +0000808 SDValue LowerSHL(SDValue Op, SelectionDAG &DAG) const;
Dan Gohmand858e902010-04-17 15:26:15 +0000809 SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) const;
Bill Wendling41ea7e72008-11-24 19:21:46 +0000810
Dan Gohmand858e902010-04-17 15:26:15 +0000811 SDValue LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
812 SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const;
813 SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const;
Eric Christopher9a9d2752010-07-22 02:48:34 +0000814 SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const;
Duncan Sands1607f052008-12-01 11:39:25 +0000815
Bruno Cardoso Lopesbf8154a2010-08-21 01:32:18 +0000816 // Utility functions to help LowerVECTOR_SHUFFLE
817 SDValue LowerVECTOR_SHUFFLEv8i16(SDValue Op, SelectionDAG &DAG) const;
818
Dan Gohman98ca4f22009-08-05 01:29:28 +0000819 virtual SDValue
820 LowerFormalArguments(SDValue Chain,
Sandeep Patel65c3c8f2009-09-02 08:44:58 +0000821 CallingConv::ID CallConv, bool isVarArg,
Dan Gohman98ca4f22009-08-05 01:29:28 +0000822 const SmallVectorImpl<ISD::InputArg> &Ins,
823 DebugLoc dl, SelectionDAG &DAG,
Dan Gohmand858e902010-04-17 15:26:15 +0000824 SmallVectorImpl<SDValue> &InVals) const;
Dan Gohman98ca4f22009-08-05 01:29:28 +0000825 virtual SDValue
Evan Cheng022d9e12010-02-02 23:55:14 +0000826 LowerCall(SDValue Chain, SDValue Callee,
Evan Cheng0c439eb2010-01-27 00:07:07 +0000827 CallingConv::ID CallConv, bool isVarArg, bool &isTailCall,
Dan Gohman98ca4f22009-08-05 01:29:28 +0000828 const SmallVectorImpl<ISD::OutputArg> &Outs,
Dan Gohmanc9403652010-07-07 15:54:55 +0000829 const SmallVectorImpl<SDValue> &OutVals,
Dan Gohman98ca4f22009-08-05 01:29:28 +0000830 const SmallVectorImpl<ISD::InputArg> &Ins,
831 DebugLoc dl, SelectionDAG &DAG,
Dan Gohmand858e902010-04-17 15:26:15 +0000832 SmallVectorImpl<SDValue> &InVals) const;
Dan Gohman98ca4f22009-08-05 01:29:28 +0000833
834 virtual SDValue
835 LowerReturn(SDValue Chain,
Sandeep Patel65c3c8f2009-09-02 08:44:58 +0000836 CallingConv::ID CallConv, bool isVarArg,
Dan Gohman98ca4f22009-08-05 01:29:28 +0000837 const SmallVectorImpl<ISD::OutputArg> &Outs,
Dan Gohmanc9403652010-07-07 15:54:55 +0000838 const SmallVectorImpl<SDValue> &OutVals,
Dan Gohmand858e902010-04-17 15:26:15 +0000839 DebugLoc dl, SelectionDAG &DAG) const;
Dan Gohman98ca4f22009-08-05 01:29:28 +0000840
Evan Cheng3d2125c2010-11-30 23:55:39 +0000841 virtual bool isUsedByReturnOnly(SDNode *N) const;
842
Kenneth Uildriksb4997ae2009-11-07 02:11:54 +0000843 virtual bool
844 CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
Dan Gohman84023e02010-07-10 09:00:22 +0000845 const SmallVectorImpl<ISD::OutputArg> &Outs,
Dan Gohmanc9af33c2010-07-06 22:19:37 +0000846 LLVMContext &Context) const;
Kenneth Uildriksb4997ae2009-11-07 02:11:54 +0000847
Duncan Sands1607f052008-12-01 11:39:25 +0000848 void ReplaceATOMIC_BINARY_64(SDNode *N, SmallVectorImpl<SDValue> &Results,
Dan Gohmand858e902010-04-17 15:26:15 +0000849 SelectionDAG &DAG, unsigned NewOp) const;
Duncan Sands1607f052008-12-01 11:39:25 +0000850
Eric Christopherb120ab42009-08-18 22:50:32 +0000851 /// Utility function to emit string processing sse4.2 instructions
852 /// that return in xmm0.
Evan Cheng431f7752009-09-19 10:09:15 +0000853 /// This takes the instruction to expand, the associated machine basic
854 /// block, the number of args, and whether or not the second arg is
855 /// in memory or not.
Eric Christopherb120ab42009-08-18 22:50:32 +0000856 MachineBasicBlock *EmitPCMP(MachineInstr *BInstr, MachineBasicBlock *BB,
Mon P Wang20adc9d2010-04-04 03:10:48 +0000857 unsigned argNum, bool inMem) const;
Eric Christopherb120ab42009-08-18 22:50:32 +0000858
Eric Christopher228232b2010-11-30 07:20:12 +0000859 /// Utility functions to emit monitor and mwait instructions. These
860 /// need to make sure that the arguments to the intrinsic are in the
861 /// correct registers.
Eric Christopher82be2202010-11-30 08:10:28 +0000862 MachineBasicBlock *EmitMonitor(MachineInstr *MI,
863 MachineBasicBlock *BB) const;
Eric Christopher228232b2010-11-30 07:20:12 +0000864 MachineBasicBlock *EmitMwait(MachineInstr *MI, MachineBasicBlock *BB) const;
865
Mon P Wang63307c32008-05-05 19:05:59 +0000866 /// Utility function to emit atomic bitwise operations (and, or, xor).
Evan Cheng431f7752009-09-19 10:09:15 +0000867 /// It takes the bitwise instruction to expand, the associated machine basic
868 /// block, and the associated X86 opcodes for reg/reg and reg/imm.
Mon P Wang63307c32008-05-05 19:05:59 +0000869 MachineBasicBlock *EmitAtomicBitwiseWithCustomInserter(
870 MachineInstr *BInstr,
871 MachineBasicBlock *BB,
872 unsigned regOpc,
Andrew Lenharth507a58a2008-06-14 05:48:15 +0000873 unsigned immOpc,
Dale Johannesen140be2d2008-08-19 18:47:28 +0000874 unsigned loadOpc,
875 unsigned cxchgOpc,
Dale Johannesen140be2d2008-08-19 18:47:28 +0000876 unsigned notOpc,
877 unsigned EAXreg,
878 TargetRegisterClass *RC,
Dan Gohman1fdbc1d2009-02-07 16:15:20 +0000879 bool invSrc = false) const;
Dale Johannesen48c1bc22008-10-02 18:53:47 +0000880
881 MachineBasicBlock *EmitAtomicBit6432WithCustomInserter(
882 MachineInstr *BInstr,
883 MachineBasicBlock *BB,
884 unsigned regOpcL,
885 unsigned regOpcH,
886 unsigned immOpcL,
887 unsigned immOpcH,
Dan Gohman1fdbc1d2009-02-07 16:15:20 +0000888 bool invSrc = false) const;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000889
Mon P Wang63307c32008-05-05 19:05:59 +0000890 /// Utility function to emit atomic min and max. It takes the min/max
Bill Wendlingbddc4422009-03-26 01:46:56 +0000891 /// instruction to expand, the associated basic block, and the associated
892 /// cmov opcode for moving the min or max value.
Mon P Wang63307c32008-05-05 19:05:59 +0000893 MachineBasicBlock *EmitAtomicMinMaxWithCustomInserter(MachineInstr *BInstr,
894 MachineBasicBlock *BB,
Dan Gohman1fdbc1d2009-02-07 16:15:20 +0000895 unsigned cmovOpc) const;
Dan Gohman076aee32009-03-04 19:44:21 +0000896
Dan Gohman320afb82010-10-12 18:00:49 +0000897 // Utility function to emit the low-level va_arg code for X86-64.
898 MachineBasicBlock *EmitVAARG64WithCustomInserter(
899 MachineInstr *MI,
900 MachineBasicBlock *MBB) const;
901
Dan Gohmand6708ea2009-08-15 01:38:56 +0000902 /// Utility function to emit the xmm reg save portion of va_start.
903 MachineBasicBlock *EmitVAStartSaveXMMRegsWithCustomInserter(
904 MachineInstr *BInstr,
905 MachineBasicBlock *BB) const;
906
Chris Lattner52600972009-09-02 05:57:00 +0000907 MachineBasicBlock *EmitLoweredSelect(MachineInstr *I,
Dan Gohmanaf1d8ca2010-05-01 00:01:06 +0000908 MachineBasicBlock *BB) const;
Anton Korobeynikov043f3c22010-03-06 19:32:29 +0000909
Michael J. Spencere9c253e2010-10-21 01:41:01 +0000910 MachineBasicBlock *EmitLoweredWinAlloca(MachineInstr *MI,
Dan Gohmanaf1d8ca2010-05-01 00:01:06 +0000911 MachineBasicBlock *BB) const;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000912
Eric Christopher30ef0e52010-06-03 04:07:48 +0000913 MachineBasicBlock *EmitLoweredTLSCall(MachineInstr *MI,
914 MachineBasicBlock *BB) const;
Anton Korobeynikov043f3c22010-03-06 19:32:29 +0000915
Rafael Espindola5bf7c532010-11-27 20:43:02 +0000916 MachineBasicBlock *emitLoweredTLSAddr(MachineInstr *MI,
917 MachineBasicBlock *BB) const;
918
Dan Gohman076aee32009-03-04 19:44:21 +0000919 /// Emit nodes that will be selected as "test Op0,Op0", or something
Dan Gohman31125812009-03-07 01:58:32 +0000920 /// equivalent, for use with the given x86 condition code.
Evan Cheng552f09a2010-04-26 19:06:11 +0000921 SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG) const;
Dan Gohman076aee32009-03-04 19:44:21 +0000922
923 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
Dan Gohman31125812009-03-07 01:58:32 +0000924 /// equivalent, for use with the given x86 condition code.
Evan Cheng552f09a2010-04-26 19:06:11 +0000925 SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
Dan Gohmand858e902010-04-17 15:26:15 +0000926 SelectionDAG &DAG) const;
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000927 };
Evan Chengc3f44b02008-09-03 00:03:49 +0000928
929 namespace X86 {
Dan Gohmana4160c32010-07-07 16:29:44 +0000930 FastISel *createFastISel(FunctionLoweringInfo &funcInfo);
Evan Chengc3f44b02008-09-03 00:03:49 +0000931 }
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000932}
933
Chris Lattnerdbdbf0c2005-11-15 00:40:23 +0000934#endif // X86ISELLOWERING_H