blob: 1c33feecc6179d98210fdaf876fd777480edd1b7 [file] [log] [blame]
Chris Lattner76ac0682005-11-15 00:40:23 +00001//===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
Chris Lattnerf3ebc3f2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Chris Lattner76ac0682005-11-15 00:40:23 +00007//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the interfaces that X86 uses to lower LLVM code into a
11// selection DAG.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef X86ISELLOWERING_H
16#define X86ISELLOWERING_H
17
Evan Chengcde9e302006-01-27 08:10:46 +000018#include "X86Subtarget.h"
Anton Korobeynikov383a3242007-07-14 14:06:15 +000019#include "X86RegisterInfo.h"
Gordon Henriksen92319582008-01-05 16:56:59 +000020#include "X86MachineFunctionInfo.h"
Chris Lattner76ac0682005-11-15 00:40:23 +000021#include "llvm/Target/TargetLowering.h"
Evan Cheng8703c412010-01-26 19:04:47 +000022#include "llvm/Target/TargetOptions.h"
Ted Kremenek2175b552008-09-03 02:54:11 +000023#include "llvm/CodeGen/FastISel.h"
Chris Lattner76ac0682005-11-15 00:40:23 +000024#include "llvm/CodeGen/SelectionDAG.h"
Rafael Espindolae636fc02007-08-31 15:06:30 +000025#include "llvm/CodeGen/CallingConvLower.h"
Chris Lattner76ac0682005-11-15 00:40:23 +000026
27namespace llvm {
Chris Lattner76ac0682005-11-15 00:40:23 +000028 namespace X86ISD {
Evan Cheng172fce72006-01-06 00:43:03 +000029 // X86 Specific DAG Nodes
Chris Lattner76ac0682005-11-15 00:40:23 +000030 enum NodeType {
31 // Start the numbering where the builtin ops leave off.
Dan Gohmaned1cf1a2008-09-23 18:42:32 +000032 FIRST_NUMBER = ISD::BUILTIN_OP_END,
Chris Lattner76ac0682005-11-15 00:40:23 +000033
Evan Chenge9fbc3f2007-12-14 02:13:44 +000034 /// BSF - Bit scan forward.
35 /// BSR - Bit scan reverse.
36 BSF,
37 BSR,
38
Evan Cheng9c249c32006-01-09 18:33:28 +000039 /// SHLD, SHRD - Double shift instructions. These correspond to
40 /// X86::SHLDxx and X86::SHRDxx instructions.
41 SHLD,
42 SHRD,
43
Evan Cheng2dd217b2006-01-31 03:14:29 +000044 /// FAND - Bitwise logical AND of floating point values. This corresponds
45 /// to X86::ANDPS or X86::ANDPD.
46 FAND,
47
Evan Cheng4363e882007-01-05 07:55:56 +000048 /// FOR - Bitwise logical OR of floating point values. This corresponds
49 /// to X86::ORPS or X86::ORPD.
50 FOR,
51
Evan Cheng72d5c252006-01-31 22:28:30 +000052 /// FXOR - Bitwise logical XOR of floating point values. This corresponds
53 /// to X86::XORPS or X86::XORPD.
54 FXOR,
55
Evan Cheng82241c82007-01-05 21:37:56 +000056 /// FSRL - Bitwise logical right shift of floating point values. These
57 /// corresponds to X86::PSRLDQ.
Evan Cheng4363e882007-01-05 07:55:56 +000058 FSRL,
59
Dan Gohmanf9bbcd12009-08-05 01:29:28 +000060 /// CALL - These operations represent an abstract X86 call
Chris Lattner76ac0682005-11-15 00:40:23 +000061 /// instruction, which includes a bunch of information. In particular the
62 /// operands of these node are:
63 ///
64 /// #0 - The incoming token chain
65 /// #1 - The callee
66 /// #2 - The number of arg bytes the caller pushes on the stack.
67 /// #3 - The number of arg bytes the callee pops off the stack.
68 /// #4 - The value to pass in AL/AX/EAX (optional)
69 /// #5 - The value to pass in DL/DX/EDX (optional)
70 ///
71 /// The result values of these nodes are:
72 ///
73 /// #0 - The outgoing token chain
74 /// #1 - The first register result value (optional)
75 /// #2 - The second register result value (optional)
76 ///
Chris Lattner76ac0682005-11-15 00:40:23 +000077 CALL,
Dan Gohmanf9bbcd12009-08-05 01:29:28 +000078
Michael J. Spencer9cafc872010-10-20 23:40:27 +000079 /// RDTSC_DAG - This operation implements the lowering for
Andrew Lenharth0bf68ae2005-11-20 21:41:10 +000080 /// readcyclecounter
81 RDTSC_DAG,
Evan Cheng225a4d02005-12-17 01:21:05 +000082
83 /// X86 compare and logical compare instructions.
Evan Cheng80700992007-09-17 17:42:53 +000084 CMP, COMI, UCOMI,
Evan Cheng225a4d02005-12-17 01:21:05 +000085
Dan Gohman25a767d2008-12-23 22:45:23 +000086 /// X86 bit-test instructions.
87 BT,
88
Chris Lattner846c20d2010-12-20 00:59:46 +000089 /// X86 SetCC. Operand 0 is condition code, and operand 1 is the EFLAGS
90 /// operand, usually produced by a CMP instruction.
Evan Chengc1583db2005-12-21 20:21:51 +000091 SETCC,
92
Evan Cheng0e8b9e32009-12-15 00:53:42 +000093 // Same as SETCC except it's materialized with a sbb and the value is all
94 // one's or all zero's.
Chris Lattner9edf3f52010-12-19 22:08:31 +000095 SETCC_CARRY, // R = carry_bit ? ~0 : 0
Evan Cheng0e8b9e32009-12-15 00:53:42 +000096
Chris Lattnera492d292009-03-12 06:46:02 +000097 /// X86 conditional moves. Operand 0 and operand 1 are the two values
98 /// to select from. Operand 2 is the condition code, and operand 3 is the
99 /// flag operand produced by a CMP or TEST instruction. It also writes a
100 /// flag result.
Evan Cheng225a4d02005-12-17 01:21:05 +0000101 CMOV,
Evan Cheng6fc31042005-12-19 23:12:38 +0000102
Dan Gohman4a683472009-03-23 15:40:10 +0000103 /// X86 conditional branches. Operand 0 is the chain operand, operand 1
104 /// is the block to branch if condition is true, operand 2 is the
105 /// condition code, and operand 3 is the flag operand produced by a CMP
Evan Chengc1583db2005-12-21 20:21:51 +0000106 /// or TEST instruction.
Evan Cheng6fc31042005-12-19 23:12:38 +0000107 BRCOND,
Evan Chenga74ce622005-12-21 02:39:21 +0000108
Dan Gohman4a683472009-03-23 15:40:10 +0000109 /// Return with a flag operand. Operand 0 is the chain operand, operand
110 /// 1 is the number of bytes of stack to pop.
Evan Chenga74ce622005-12-21 02:39:21 +0000111 RET_FLAG,
Evan Chengae986f12006-01-11 22:15:48 +0000112
113 /// REP_STOS - Repeat fill, corresponds to X86::REP_STOSx.
114 REP_STOS,
115
116 /// REP_MOVS - Repeat move, corresponds to X86::REP_MOVSx.
117 REP_MOVS,
Evan Cheng72d5c252006-01-31 22:28:30 +0000118
Evan Cheng5588de92006-02-18 00:15:05 +0000119 /// GlobalBaseReg - On Darwin, this node represents the result of the popl
120 /// at function entry, used for PIC code.
121 GlobalBaseReg,
Evan Cheng1f342c22006-02-23 02:43:52 +0000122
Bill Wendling24c79f22008-09-16 21:48:12 +0000123 /// Wrapper - A wrapper node for TargetConstantPool,
124 /// TargetExternalSymbol, and TargetGlobalAddress.
Evan Chenge0ed6ec2006-02-23 20:41:18 +0000125 Wrapper,
Evan Chengd5e905d2006-03-21 23:01:21 +0000126
Evan Chengae1cd752006-11-30 21:55:46 +0000127 /// WrapperRIP - Special wrapper used under X86-64 PIC mode for RIP
128 /// relative displacements.
129 WrapperRIP,
130
Dale Johannesendd224d22010-09-30 23:57:10 +0000131 /// MOVQ2DQ - Copies a 64-bit value from an MMX vector to the low word
132 /// of an XMM vector, with the high word zero filled.
Mon P Wang586d9972010-01-24 00:05:03 +0000133 MOVQ2DQ,
134
Dale Johannesendd224d22010-09-30 23:57:10 +0000135 /// MOVDQ2Q - Copies a 64-bit value from the low word of an XMM vector
136 /// to an MMX vector. If you think this is too close to the previous
137 /// mnemonic, so do I; blame Intel.
138 MOVDQ2Q,
139
Nate Begeman2d77e8e42008-02-11 04:19:36 +0000140 /// PEXTRB - Extract an 8-bit value from a vector and zero extend it to
141 /// i32, corresponds to X86::PEXTRB.
142 PEXTRB,
143
Evan Chengcbffa462006-03-31 19:22:53 +0000144 /// PEXTRW - Extract a 16-bit value from a vector and zero extend it to
Evan Cheng5fd7c692006-03-31 21:55:24 +0000145 /// i32, corresponds to X86::PEXTRW.
Evan Chengcbffa462006-03-31 19:22:53 +0000146 PEXTRW,
Evan Cheng5fd7c692006-03-31 21:55:24 +0000147
Nate Begeman2d77e8e42008-02-11 04:19:36 +0000148 /// INSERTPS - Insert any element of a 4 x float vector into any element
149 /// of a destination 4 x floatvector.
150 INSERTPS,
151
152 /// PINSRB - Insert the lower 8-bits of a 32-bit value to a vector,
153 /// corresponds to X86::PINSRB.
154 PINSRB,
155
Evan Cheng5fd7c692006-03-31 21:55:24 +0000156 /// PINSRW - Insert the lower 16-bits of a 32-bit value to a vector,
157 /// corresponds to X86::PINSRW.
Chris Lattnera8288502010-02-23 02:07:48 +0000158 PINSRW, MMX_PINSRW,
Evan Cheng49683ba2006-11-10 21:43:37 +0000159
Nate Begemane684da32009-02-23 08:49:38 +0000160 /// PSHUFB - Shuffle 16 8-bit values within a vector.
161 PSHUFB,
Nate Begeman97b72c92010-12-17 22:55:37 +0000162
163 /// PANDN - and with not'd value.
164 PANDN,
165
166 /// PSIGNB/W/D - Copy integer sign.
167 PSIGNB, PSIGNW, PSIGND,
168
Nate Begeman4b9db072010-12-20 22:04:24 +0000169 /// PBLENDVB - Variable blend
170 PBLENDVB,
171
Evan Cheng49683ba2006-11-10 21:43:37 +0000172 /// FMAX, FMIN - Floating point max and min.
173 ///
Lauro Ramos Venancio25188892007-04-20 21:38:10 +0000174 FMAX, FMIN,
Dan Gohman57111e72007-07-10 00:05:58 +0000175
176 /// FRSQRT, FRCP - Floating point reciprocal-sqrt and reciprocal
177 /// approximation. Note that these typically require refinement
178 /// in order to obtain suitable precision.
179 FRSQRT, FRCP,
180
Rafael Espindola3b2df102009-04-08 21:14:34 +0000181 // TLSADDR - Thread Local Storage.
182 TLSADDR,
Michael J. Spencer9cafc872010-10-20 23:40:27 +0000183
Eric Christopherb0e1a452010-06-03 04:07:48 +0000184 // TLSCALL - Thread Local Storage. When calling to an OS provided
185 // thunk at the address from an earlier relocation.
186 TLSCALL,
Rafael Espindola3b2df102009-04-08 21:14:34 +0000187
Evan Cheng78af38c2008-05-08 00:57:18 +0000188 // EH_RETURN - Exception Handling helpers.
Arnold Schwaighofer9ccea992007-10-11 19:40:01 +0000189 EH_RETURN,
Michael J. Spencer9cafc872010-10-20 23:40:27 +0000190
Arnold Schwaighofer7da2bce2008-03-19 16:39:45 +0000191 /// TC_RETURN - Tail call return.
192 /// operand #0 chain
193 /// operand #1 callee (register or absolute)
194 /// operand #2 stack adjustment
195 /// operand #3 optional in flag
Anton Korobeynikov91460e42007-11-16 01:31:51 +0000196 TC_RETURN,
197
Evan Cheng961339b2008-05-09 21:53:03 +0000198 // VZEXT_MOVL - Vector move low and zero extend.
199 VZEXT_MOVL,
200
Evan Cheng5e28227d2008-05-29 08:22:04 +0000201 // VSHL, VSRL - Vector logical left / right shift.
Nate Begeman55b7bec2008-07-17 16:51:19 +0000202 VSHL, VSRL,
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000203
204 // CMPPD, CMPPS - Vector double/float comparison.
Nate Begeman55b7bec2008-07-17 16:51:19 +0000205 // CMPPD, CMPPS - Vector double/float comparison.
206 CMPPD, CMPPS,
Michael J. Spencer9cafc872010-10-20 23:40:27 +0000207
Nate Begeman55b7bec2008-07-17 16:51:19 +0000208 // PCMP* - Vector integer comparisons.
209 PCMPEQB, PCMPEQW, PCMPEQD, PCMPEQQ,
Bill Wendling1a317672008-12-12 00:56:36 +0000210 PCMPGTB, PCMPGTW, PCMPGTD, PCMPGTQ,
211
Chris Lattner364bb0a2010-12-05 07:30:36 +0000212 // ADD, SUB, SMUL, etc. - Arithmetic operations with FLAGS results.
Chris Lattner846c20d2010-12-20 00:59:46 +0000213 ADD, SUB, ADC, SBB, SMUL,
Dan Gohman722b1ee2009-09-18 19:59:53 +0000214 INC, DEC, OR, XOR, AND,
Chris Lattner364bb0a2010-12-05 07:30:36 +0000215
216 UMUL, // LOW, HI, FLAGS = umul LHS, RHS
Evan Chenga84a3182009-03-30 21:36:47 +0000217
218 // MUL_IMM - X86 specific multiply by immediate.
Eric Christopherf7802a32009-07-29 00:28:05 +0000219 MUL_IMM,
Michael J. Spencer9cafc872010-10-20 23:40:27 +0000220
Eric Christopherf7802a32009-07-29 00:28:05 +0000221 // PTEST - Vector bitwise comparisons
Dan Gohman0700a562009-08-15 01:38:56 +0000222 PTEST,
223
Bruno Cardoso Lopes91d61df2010-08-10 23:25:42 +0000224 // TESTP - Vector packed fp sign bitwise comparisons
225 TESTP,
226
Bruno Cardoso Lopes6f3b38a2010-08-20 22:55:05 +0000227 // Several flavors of instructions with vector shuffle behaviors.
228 PALIGN,
229 PSHUFD,
230 PSHUFHW,
231 PSHUFLW,
232 PSHUFHW_LD,
233 PSHUFLW_LD,
234 SHUFPD,
235 SHUFPS,
236 MOVDDUP,
237 MOVSHDUP,
238 MOVSLDUP,
239 MOVSHDUP_LD,
240 MOVSLDUP_LD,
241 MOVLHPS,
Bruno Cardoso Lopes6f3b38a2010-08-20 22:55:05 +0000242 MOVLHPD,
Bruno Cardoso Lopes03e4c352010-08-31 21:15:21 +0000243 MOVHLPS,
Bruno Cardoso Lopes6f3b38a2010-08-20 22:55:05 +0000244 MOVHLPD,
Bruno Cardoso Lopesb3825212010-09-01 05:08:25 +0000245 MOVLPS,
246 MOVLPD,
Bruno Cardoso Lopes6f3b38a2010-08-20 22:55:05 +0000247 MOVSD,
248 MOVSS,
249 UNPCKLPS,
250 UNPCKLPD,
251 UNPCKHPS,
252 UNPCKHPD,
253 PUNPCKLBW,
254 PUNPCKLWD,
255 PUNPCKLDQ,
256 PUNPCKLQDQ,
257 PUNPCKHBW,
258 PUNPCKHWD,
259 PUNPCKHDQ,
260 PUNPCKHQDQ,
261
Dan Gohman0700a562009-08-15 01:38:56 +0000262 // VASTART_SAVE_XMM_REGS - Save xmm argument registers to the stack,
263 // according to %al. An operator is needed so that this can be expanded
264 // with control flow.
Dan Gohman48b185d2009-09-25 20:36:54 +0000265 VASTART_SAVE_XMM_REGS,
266
Michael J. Spencerf509c6c2010-10-21 01:41:01 +0000267 // WIN_ALLOCA - Windows's _chkstk call to do stack probing.
268 WIN_ALLOCA,
Anton Korobeynikovd5e3fd62010-03-06 19:32:29 +0000269
Duncan Sands7c601de2010-11-20 11:25:00 +0000270 // Memory barrier
271 MEMBARRIER,
272 MFENCE,
273 SFENCE,
274 LFENCE,
275
Michael J. Spencer9cafc872010-10-20 23:40:27 +0000276 // ATOMADD64_DAG, ATOMSUB64_DAG, ATOMOR64_DAG, ATOMAND64_DAG,
277 // ATOMXOR64_DAG, ATOMNAND64_DAG, ATOMSWAP64_DAG -
Dan Gohman48b185d2009-09-25 20:36:54 +0000278 // Atomic 64-bit binary operations.
279 ATOMADD64_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE,
280 ATOMSUB64_DAG,
281 ATOMOR64_DAG,
282 ATOMXOR64_DAG,
283 ATOMAND64_DAG,
284 ATOMNAND64_DAG,
Eric Christopher9a773822010-07-22 02:48:34 +0000285 ATOMSWAP64_DAG,
Michael J. Spencer9cafc872010-10-20 23:40:27 +0000286
Chris Lattnere479e962010-09-21 23:59:42 +0000287 // LCMPXCHG_DAG, LCMPXCHG8_DAG - Compare and swap.
288 LCMPXCHG_DAG,
Chris Lattner54e53292010-09-22 00:34:38 +0000289 LCMPXCHG8_DAG,
Anton Korobeynikovd5e3fd62010-03-06 19:32:29 +0000290
Chris Lattner54e53292010-09-22 00:34:38 +0000291 // VZEXT_LOAD - Load, scalar_to_vector, and zero extend.
Chris Lattner78f518b2010-09-22 01:05:16 +0000292 VZEXT_LOAD,
Michael J. Spencer9cafc872010-10-20 23:40:27 +0000293
Chris Lattnered85da52010-09-22 01:11:26 +0000294 // FNSTCW16m - Store FP control world into i16 memory.
295 FNSTCW16m,
Michael J. Spencer9cafc872010-10-20 23:40:27 +0000296
Chris Lattner78f518b2010-09-22 01:05:16 +0000297 /// FP_TO_INT*_IN_MEM - This instruction implements FP_TO_SINT with the
298 /// integer destination in memory and a FP reg source. This corresponds
299 /// to the X86::FIST*m instructions and the rounding mode change stuff. It
300 /// has two inputs (token chain and address) and two outputs (int value
301 /// and token chain).
302 FP_TO_INT16_IN_MEM,
303 FP_TO_INT32_IN_MEM,
Chris Lattnera5156c32010-09-22 01:28:21 +0000304 FP_TO_INT64_IN_MEM,
Michael J. Spencer9cafc872010-10-20 23:40:27 +0000305
Chris Lattnera5156c32010-09-22 01:28:21 +0000306 /// FILD, FILD_FLAG - This instruction implements SINT_TO_FP with the
307 /// integer source in memory and FP reg result. This corresponds to the
308 /// X86::FILD*m instructions. It has three inputs (token chain, address,
309 /// and source type) and two outputs (FP value and token chain). FILD_FLAG
310 /// also produces a flag).
311 FILD,
312 FILD_FLAG,
Michael J. Spencer9cafc872010-10-20 23:40:27 +0000313
Chris Lattnera5156c32010-09-22 01:28:21 +0000314 /// FLD - This instruction implements an extending load to FP stack slots.
315 /// This corresponds to the X86::FLD32m / X86::FLD64m. It takes a chain
316 /// operand, ptr to load from, and a ValueType node indicating the type
317 /// to load to.
318 FLD,
Michael J. Spencer9cafc872010-10-20 23:40:27 +0000319
Chris Lattnera5156c32010-09-22 01:28:21 +0000320 /// FST - This instruction implements a truncating store to FP stack
321 /// slots. This corresponds to the X86::FST32m / X86::FST64m. It takes a
322 /// chain operand, value to store, address, and a ValueType to store it
323 /// as.
Dan Gohman395a8982010-10-12 18:00:49 +0000324 FST,
325
326 /// VAARG_64 - This instruction grabs the address of the next argument
327 /// from a va_list. (reads and modifies the va_list in memory)
328 VAARG_64
Michael J. Spencer9cafc872010-10-20 23:40:27 +0000329
Anton Korobeynikovd5e3fd62010-03-06 19:32:29 +0000330 // WARNING: Do not add anything in the end unless you want the node to
331 // have memop! In fact, starting from ATOMADD64_DAG all opcodes will be
332 // thought as target memory ops!
Chris Lattner76ac0682005-11-15 00:40:23 +0000333 };
334 }
335
Evan Cheng084a1cd2008-01-29 19:34:22 +0000336 /// Define some predicates that are used for node matching.
337 namespace X86 {
338 /// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand
339 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000340 bool isPSHUFDMask(ShuffleVectorSDNode *N);
Evan Cheng68ad48b2006-03-22 18:59:22 +0000341
Evan Cheng084a1cd2008-01-29 19:34:22 +0000342 /// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand
343 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000344 bool isPSHUFHWMask(ShuffleVectorSDNode *N);
Evan Chengb7fedff2006-03-29 23:07:14 +0000345
Evan Cheng084a1cd2008-01-29 19:34:22 +0000346 /// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand
347 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000348 bool isPSHUFLWMask(ShuffleVectorSDNode *N);
Evan Chengb7fedff2006-03-29 23:07:14 +0000349
Evan Cheng084a1cd2008-01-29 19:34:22 +0000350 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
351 /// specifies a shuffle of elements that is suitable for input to SHUFP*.
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000352 bool isSHUFPMask(ShuffleVectorSDNode *N);
Evan Chengd27fb3e2006-03-24 01:18:28 +0000353
Evan Cheng084a1cd2008-01-29 19:34:22 +0000354 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
355 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000356 bool isMOVHLPSMask(ShuffleVectorSDNode *N);
Evan Cheng2595a682006-03-24 02:58:06 +0000357
Evan Cheng084a1cd2008-01-29 19:34:22 +0000358 /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
359 /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
360 /// <2, 3, 2, 3>
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000361 bool isMOVHLPS_v_undef_Mask(ShuffleVectorSDNode *N);
Evan Cheng922e1912006-11-07 22:14:24 +0000362
Evan Cheng084a1cd2008-01-29 19:34:22 +0000363 /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000364 /// specifies a shuffle of elements that is suitable for MOVLP{S|D}.
365 bool isMOVLPMask(ShuffleVectorSDNode *N);
Evan Chengc995b452006-04-06 23:23:56 +0000366
Evan Cheng084a1cd2008-01-29 19:34:22 +0000367 /// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000368 /// specifies a shuffle of elements that is suitable for MOVHP{S|D}.
Evan Cheng084a1cd2008-01-29 19:34:22 +0000369 /// as well as MOVLHPS.
Nate Begeman3a313df2009-11-07 23:17:15 +0000370 bool isMOVLHPSMask(ShuffleVectorSDNode *N);
Evan Chengc995b452006-04-06 23:23:56 +0000371
Evan Cheng084a1cd2008-01-29 19:34:22 +0000372 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
373 /// specifies a shuffle of elements that is suitable for input to UNPCKL.
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000374 bool isUNPCKLMask(ShuffleVectorSDNode *N, bool V2IsSplat = false);
Evan Cheng5df75882006-03-28 00:39:58 +0000375
Evan Cheng084a1cd2008-01-29 19:34:22 +0000376 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
377 /// specifies a shuffle of elements that is suitable for input to UNPCKH.
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000378 bool isUNPCKHMask(ShuffleVectorSDNode *N, bool V2IsSplat = false);
Evan Cheng2bc32802006-03-28 02:43:26 +0000379
Evan Cheng084a1cd2008-01-29 19:34:22 +0000380 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
381 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
382 /// <0, 0, 1, 1>
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000383 bool isUNPCKL_v_undef_Mask(ShuffleVectorSDNode *N);
Evan Chengf3b52c82006-04-05 07:20:06 +0000384
Evan Cheng084a1cd2008-01-29 19:34:22 +0000385 /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
386 /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
387 /// <2, 2, 3, 3>
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000388 bool isUNPCKH_v_undef_Mask(ShuffleVectorSDNode *N);
Bill Wendling591eab82007-04-24 21:16:55 +0000389
Evan Cheng084a1cd2008-01-29 19:34:22 +0000390 /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
391 /// specifies a shuffle of elements that is suitable for input to MOVSS,
392 /// MOVSD, and MOVD, i.e. setting the lowest element.
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000393 bool isMOVLMask(ShuffleVectorSDNode *N);
Evan Cheng12ba3e22006-04-11 00:19:04 +0000394
Evan Cheng084a1cd2008-01-29 19:34:22 +0000395 /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
396 /// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000397 bool isMOVSHDUPMask(ShuffleVectorSDNode *N);
Evan Cheng5d247f82006-04-14 21:59:03 +0000398
Evan Cheng084a1cd2008-01-29 19:34:22 +0000399 /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
400 /// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000401 bool isMOVSLDUPMask(ShuffleVectorSDNode *N);
Evan Chenge056dd52006-10-27 21:08:32 +0000402
Evan Cheng74c9ed92008-09-25 20:50:48 +0000403 /// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
404 /// specifies a shuffle of elements that is suitable for input to MOVDDUP.
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000405 bool isMOVDDUPMask(ShuffleVectorSDNode *N);
Evan Cheng74c9ed92008-09-25 20:50:48 +0000406
Nate Begeman18df82a2009-10-19 02:17:23 +0000407 /// isPALIGNRMask - Return true if the specified VECTOR_SHUFFLE operand
408 /// specifies a shuffle of elements that is suitable for input to PALIGNR.
409 bool isPALIGNRMask(ShuffleVectorSDNode *N);
410
Evan Cheng084a1cd2008-01-29 19:34:22 +0000411 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
412 /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP*
413 /// instructions.
414 unsigned getShuffleSHUFImmediate(SDNode *N);
Evan Chengb7fedff2006-03-29 23:07:14 +0000415
Evan Cheng084a1cd2008-01-29 19:34:22 +0000416 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
Nate Begeman18df82a2009-10-19 02:17:23 +0000417 /// the specified VECTOR_SHUFFLE mask with PSHUFHW instruction.
Evan Cheng084a1cd2008-01-29 19:34:22 +0000418 unsigned getShufflePSHUFHWImmediate(SDNode *N);
Evan Chengb7fedff2006-03-29 23:07:14 +0000419
Nate Begeman18df82a2009-10-19 02:17:23 +0000420 /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
421 /// the specified VECTOR_SHUFFLE mask with PSHUFLW instruction.
Evan Cheng084a1cd2008-01-29 19:34:22 +0000422 unsigned getShufflePSHUFLWImmediate(SDNode *N);
Evan Chenge62288f2009-07-30 08:33:02 +0000423
Nate Begeman18df82a2009-10-19 02:17:23 +0000424 /// getShufflePALIGNRImmediate - Return the appropriate immediate to shuffle
425 /// the specified VECTOR_SHUFFLE mask with the PALIGNR instruction.
426 unsigned getShufflePALIGNRImmediate(SDNode *N);
427
Evan Chenge62288f2009-07-30 08:33:02 +0000428 /// isZeroNode - Returns true if Elt is a constant zero or a floating point
429 /// constant +0.0.
430 bool isZeroNode(SDValue Elt);
Anton Korobeynikov741ea0d2009-08-05 23:01:26 +0000431
432 /// isOffsetSuitableForCodeModel - Returns true of the given offset can be
433 /// fit into displacement field of the instruction.
434 bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
435 bool hasSymbolicDisplacement = true);
Evan Cheng084a1cd2008-01-29 19:34:22 +0000436 }
437
Chris Lattnerf4aeff02006-10-18 18:26:48 +0000438 //===--------------------------------------------------------------------===//
Chris Lattner76ac0682005-11-15 00:40:23 +0000439 // X86TargetLowering - X86 Implementation of the TargetLowering interface
440 class X86TargetLowering : public TargetLowering {
Chris Lattner76ac0682005-11-15 00:40:23 +0000441 public:
Dan Gohmaneabd6472008-05-14 01:58:56 +0000442 explicit X86TargetLowering(X86TargetMachine &TM);
Chris Lattner76ac0682005-11-15 00:40:23 +0000443
Chris Lattner4bfbe932010-01-26 05:02:42 +0000444 virtual unsigned getJumpTableEncoding() const;
Chris Lattner9c1efcd2010-01-25 23:38:14 +0000445
Chris Lattner4bfbe932010-01-26 05:02:42 +0000446 virtual const MCExpr *
447 LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
448 const MachineBasicBlock *MBB, unsigned uid,
449 MCContext &Ctx) const;
Michael J. Spencer9cafc872010-10-20 23:40:27 +0000450
Evan Cheng797d56f2007-11-09 01:32:10 +0000451 /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
452 /// jumptable.
Chris Lattner4bfbe932010-01-26 05:02:42 +0000453 virtual SDValue getPICJumpTableRelocBase(SDValue Table,
454 SelectionDAG &DAG) const;
Chris Lattner8a785d72010-01-26 06:28:43 +0000455 virtual const MCExpr *
456 getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
457 unsigned JTI, MCContext &Ctx) const;
Michael J. Spencer9cafc872010-10-20 23:40:27 +0000458
Chris Lattner74f5bcf2007-02-26 04:01:25 +0000459 /// getStackPtrReg - Return the stack pointer register we are using: either
460 /// ESP or RSP.
461 unsigned getStackPtrReg() const { return X86StackPtr; }
Evan Cheng35abd842008-01-23 23:17:41 +0000462
463 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
464 /// function arguments in the caller parameter area. For X86, aggregates
465 /// that contains are placed at 16-byte boundaries while the rest are at
466 /// 4-byte boundaries.
467 virtual unsigned getByValTypeAlignment(const Type *Ty) const;
Evan Chengef377ad2008-05-15 08:39:06 +0000468
469 /// getOptimalMemOpType - Returns the target specific optimal type for load
Evan Cheng61399372010-04-02 19:36:14 +0000470 /// and store operations as a result of memset, memcpy, and memmove
471 /// lowering. If DstAlign is zero that means it's safe to destination
472 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
473 /// means there isn't a need to check it against alignment requirement,
474 /// probably because the source does not need to be loaded. If
475 /// 'NonScalarIntSafe' is true, that means it's safe to return a
476 /// non-scalar-integer type, e.g. empty string source, constant, or loaded
Evan Chengebe47c82010-04-08 07:37:57 +0000477 /// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is
478 /// constant so it does not need to be loaded.
Dan Gohman148c69a2010-04-16 20:11:05 +0000479 /// It returns EVT::Other if the type should be determined using generic
480 /// target-independent logic.
Evan Cheng61399372010-04-02 19:36:14 +0000481 virtual EVT
Evan Chengebe47c82010-04-08 07:37:57 +0000482 getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
483 bool NonScalarIntSafe, bool MemcpyStrSrc,
Dan Gohman148c69a2010-04-16 20:11:05 +0000484 MachineFunction &MF) const;
Bill Wendlingbae6b2c2009-08-15 21:21:19 +0000485
486 /// allowsUnalignedMemoryAccesses - Returns true if the target allows
487 /// unaligned memory accesses. of the specified type.
488 virtual bool allowsUnalignedMemoryAccesses(EVT VT) const {
489 return true;
490 }
Bill Wendling31ceb1b2009-06-30 22:38:32 +0000491
Chris Lattner76ac0682005-11-15 00:40:23 +0000492 /// LowerOperation - Provide custom lowering hooks for some operations.
493 ///
Dan Gohman21cea8a2010-04-17 15:26:15 +0000494 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
Chris Lattner76ac0682005-11-15 00:40:23 +0000495
Duncan Sands6ed40142008-12-01 11:39:25 +0000496 /// ReplaceNodeResults - Replace the results of node with an illegal result
497 /// type with new values built out of custom code.
Chris Lattnerf81d5882007-11-24 07:07:01 +0000498 ///
Duncan Sands6ed40142008-12-01 11:39:25 +0000499 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
Dan Gohman21cea8a2010-04-17 15:26:15 +0000500 SelectionDAG &DAG) const;
Chris Lattnerf81d5882007-11-24 07:07:01 +0000501
Michael J. Spencer9cafc872010-10-20 23:40:27 +0000502
Dan Gohman2ce6f2a2008-07-27 21:46:04 +0000503 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
Evan Cheng5987cfb2006-07-07 08:33:52 +0000504
Evan Chengf1bd5fc2010-04-17 06:13:15 +0000505 /// isTypeDesirableForOp - Return true if the target has native support for
506 /// the specified value type and it is 'desirable' to use the type for the
507 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
508 /// instruction encodings are longer and some i16 instructions are slow.
509 virtual bool isTypeDesirableForOp(unsigned Opc, EVT VT) const;
510
511 /// isTypeDesirable - Return true if the target has native support for the
512 /// specified value type and it is 'desirable' to use the type. e.g. On x86
513 /// i16 is legal, but undesirable since i16 instruction encodings are longer
514 /// and some i16 instructions are slow.
515 virtual bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const;
Evan Chengaf56fac2010-04-16 06:14:10 +0000516
Dan Gohman25c16532010-05-01 00:01:06 +0000517 virtual MachineBasicBlock *
518 EmitInstrWithCustomInserter(MachineInstr *MI,
519 MachineBasicBlock *MBB) const;
Evan Cheng339edad2006-01-11 00:33:36 +0000520
Michael J. Spencer9cafc872010-10-20 23:40:27 +0000521
Evan Cheng6af02632005-12-20 06:22:03 +0000522 /// getTargetNodeName - This method returns the name of a target specific
523 /// DAG node.
524 virtual const char *getTargetNodeName(unsigned Opcode) const;
525
Scott Michela6729e82008-03-10 15:42:14 +0000526 /// getSetCCResultType - Return the ISD::SETCC ValueType
Owen Anderson9f944592009-08-11 20:47:22 +0000527 virtual MVT::SimpleValueType getSetCCResultType(EVT VT) const;
Scott Michela6729e82008-03-10 15:42:14 +0000528
Michael J. Spencer9cafc872010-10-20 23:40:27 +0000529 /// computeMaskedBitsForTargetNode - Determine which of the bits specified
530 /// in Mask are known to be either zero or one and return them in the
Nate Begeman8a77efe2006-02-16 21:11:51 +0000531 /// KnownZero/KnownOne bitsets.
Dan Gohman2ce6f2a2008-07-27 21:46:04 +0000532 virtual void computeMaskedBitsForTargetNode(const SDValue Op,
Dan Gohmane1d9ee62008-02-13 22:28:48 +0000533 const APInt &Mask,
Michael J. Spencer9cafc872010-10-20 23:40:27 +0000534 APInt &KnownZero,
Dan Gohmanf990faf2008-02-13 00:35:47 +0000535 APInt &KnownOne,
Dan Gohman309d3d52007-06-22 14:59:07 +0000536 const SelectionDAG &DAG,
Nate Begeman8a77efe2006-02-16 21:11:51 +0000537 unsigned Depth = 0) const;
Evan Cheng2609d5e2008-05-12 19:56:52 +0000538
Owen Anderson5e65dfb2010-09-21 20:42:50 +0000539 // ComputeNumSignBitsForTargetNode - Determine the number of bits in the
540 // operation that are sign bits.
541 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
542 unsigned Depth) const;
543
Evan Cheng2609d5e2008-05-12 19:56:52 +0000544 virtual bool
Dan Gohmanbcaf6812010-04-15 01:51:59 +0000545 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
Michael J. Spencer9cafc872010-10-20 23:40:27 +0000546
Dan Gohman21cea8a2010-04-17 15:26:15 +0000547 SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const;
Chris Lattner76ac0682005-11-15 00:40:23 +0000548
Chris Lattner5849d222009-07-20 17:51:36 +0000549 virtual bool ExpandInlineAsm(CallInst *CI) const;
Michael J. Spencer9cafc872010-10-20 23:40:27 +0000550
Chris Lattnerd6855142007-03-25 02:14:49 +0000551 ConstraintType getConstraintType(const std::string &Constraint) const;
Michael J. Spencer9cafc872010-10-20 23:40:27 +0000552
John Thompsone8360b72010-10-29 17:29:13 +0000553 /// Examine constraint string and operand type and determine a weight value.
John Thompson1094c802010-09-13 18:15:37 +0000554 /// The operand object must already have been set up with the operand type.
John Thompsone8360b72010-10-29 17:29:13 +0000555 virtual ConstraintWeight getSingleConstraintMatchWeight(
John Thompson1094c802010-09-13 18:15:37 +0000556 AsmOperandInfo &info, const char *constraint) const;
Michael J. Spencer9cafc872010-10-20 23:40:27 +0000557
558 std::vector<unsigned>
Chris Lattner7ad77df2006-02-22 00:56:39 +0000559 getRegClassForInlineAsmConstraint(const std::string &Constraint,
Owen Anderson53aa7a92009-08-10 22:56:29 +0000560 EVT VT) const;
Chris Lattnerd8c9cb92007-08-25 00:47:38 +0000561
Owen Anderson53aa7a92009-08-10 22:56:29 +0000562 virtual const char *LowerXConstraint(EVT ConstraintVT) const;
Dale Johannesen2b3bc302008-01-29 02:21:21 +0000563
Chris Lattnerd8c9cb92007-08-25 00:47:38 +0000564 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
Evan Chenge0add202008-09-24 00:05:32 +0000565 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
566 /// true it means one of the asm constraint of the inline asm instruction
567 /// being processed is 'm'.
Dan Gohman2ce6f2a2008-07-27 21:46:04 +0000568 virtual void LowerAsmOperandForConstraint(SDValue Op,
Chris Lattnerd8c9cb92007-08-25 00:47:38 +0000569 char ConstraintLetter,
Dan Gohman2ce6f2a2008-07-27 21:46:04 +0000570 std::vector<SDValue> &Ops,
Chris Lattner724539c2008-04-26 23:02:14 +0000571 SelectionDAG &DAG) const;
Michael J. Spencer9cafc872010-10-20 23:40:27 +0000572
Chris Lattnerf4aeff02006-10-18 18:26:48 +0000573 /// getRegForInlineAsmConstraint - Given a physical register constraint
574 /// (e.g. {edx}), return the register number and the register class for the
575 /// register. This should only be used for C_Register constraints. On
576 /// error, this returns a register number of 0.
Michael J. Spencer9cafc872010-10-20 23:40:27 +0000577 std::pair<unsigned, const TargetRegisterClass*>
Chris Lattner524129d2006-07-31 23:26:50 +0000578 getRegForInlineAsmConstraint(const std::string &Constraint,
Owen Anderson53aa7a92009-08-10 22:56:29 +0000579 EVT VT) const;
Michael J. Spencer9cafc872010-10-20 23:40:27 +0000580
Chris Lattner1eb94d92007-03-30 23:15:24 +0000581 /// isLegalAddressingMode - Return true if the addressing mode represented
582 /// by AM is legal for this target, for a load/store of the specified type.
583 virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const;
584
Evan Cheng7f3d0242007-10-26 01:56:11 +0000585 /// isTruncateFree - Return true if it's free to truncate a value of
586 /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
587 /// register EAX to i16 by referencing its sub-register AX.
588 virtual bool isTruncateFree(const Type *Ty1, const Type *Ty2) const;
Owen Anderson53aa7a92009-08-10 22:56:29 +0000589 virtual bool isTruncateFree(EVT VT1, EVT VT2) const;
Dan Gohmanad3e5492009-04-08 00:15:30 +0000590
591 /// isZExtFree - Return true if any actual instruction that defines a
592 /// value of type Ty1 implicit zero-extends the value to Ty2 in the result
593 /// register. This does not necessarily include registers defined in
594 /// unknown ways, such as incoming arguments, or copies from unknown
595 /// virtual registers. Also, if isTruncateFree(Ty2, Ty1) is true, this
596 /// does not necessarily apply to truncate instructions. e.g. on x86-64,
597 /// all instructions that define 32-bit values implicit zero-extend the
598 /// result out to 64 bits.
599 virtual bool isZExtFree(const Type *Ty1, const Type *Ty2) const;
Owen Anderson53aa7a92009-08-10 22:56:29 +0000600 virtual bool isZExtFree(EVT VT1, EVT VT2) const;
Dan Gohmanad3e5492009-04-08 00:15:30 +0000601
Evan Chenga9cda8a2009-05-28 00:35:15 +0000602 /// isNarrowingProfitable - Return true if it's profitable to narrow
603 /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow
604 /// from i32 to i8 but not from i32 to i16.
Owen Anderson53aa7a92009-08-10 22:56:29 +0000605 virtual bool isNarrowingProfitable(EVT VT1, EVT VT2) const;
Evan Chenga9cda8a2009-05-28 00:35:15 +0000606
Evan Cheng16993aa2009-10-27 19:56:55 +0000607 /// isFPImmLegal - Returns true if the target can instruction select the
608 /// specified FP immediate natively. If false, the legalizer will
609 /// materialize the FP immediate as a load from a constant pool.
Evan Cheng83896a52009-10-28 01:43:28 +0000610 virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
Evan Cheng16993aa2009-10-27 19:56:55 +0000611
Evan Cheng68ad48b2006-03-22 18:59:22 +0000612 /// isShuffleMaskLegal - Targets can use this to indicate that they only
613 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
Chris Lattnerf4aeff02006-10-18 18:26:48 +0000614 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask
615 /// values are assumed to be legal.
Nate Begeman5f829d82009-04-29 05:20:52 +0000616 virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask,
Owen Anderson53aa7a92009-08-10 22:56:29 +0000617 EVT VT) const;
Evan Cheng60f0b892006-04-20 08:58:49 +0000618
619 /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is
620 /// used by Targets can use this to indicate if there is a suitable
621 /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant
622 /// pool entry.
Nate Begeman5f829d82009-04-29 05:20:52 +0000623 virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
Owen Anderson53aa7a92009-08-10 22:56:29 +0000624 EVT VT) const;
Evan Cheng0a62cb42008-03-05 01:30:59 +0000625
626 /// ShouldShrinkFPConstant - If true, then instruction selection should
627 /// seek to shrink the FP constant of the specified type to a smaller type
628 /// in order to save space and / or reduce runtime.
Owen Anderson53aa7a92009-08-10 22:56:29 +0000629 virtual bool ShouldShrinkFPConstant(EVT VT) const {
Evan Cheng0a62cb42008-03-05 01:30:59 +0000630 // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more
631 // expensive than a straight movsd. On the other hand, it's important to
632 // shrink long double fp constant since fldt is very slow.
Owen Anderson9f944592009-08-11 20:47:22 +0000633 return !X86ScalarSSEf64 || VT == MVT::f80;
Evan Cheng0a62cb42008-03-05 01:30:59 +0000634 }
Michael J. Spencer9cafc872010-10-20 23:40:27 +0000635
Dan Gohman4df9d9c2010-05-11 16:21:03 +0000636 const X86Subtarget* getSubtarget() const {
Dan Gohman544ab2c2008-04-12 04:36:06 +0000637 return Subtarget;
Rafael Espindolafa0df552007-11-05 23:12:20 +0000638 }
639
Chris Lattner7dc00e82008-01-18 06:52:41 +0000640 /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
641 /// computed in an SSE register, not on the X87 floating point stack.
Owen Anderson53aa7a92009-08-10 22:56:29 +0000642 bool isScalarFPTypeInSSEReg(EVT VT) const {
Owen Anderson9f944592009-08-11 20:47:22 +0000643 return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
644 (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
Chris Lattner7dc00e82008-01-18 06:52:41 +0000645 }
Dan Gohman4619e932008-08-19 21:32:53 +0000646
647 /// createFastISel - This method returns a target specific FastISel object,
648 /// or null if the target does not support "fast" ISel.
Dan Gohman87fb4e82010-07-07 16:29:44 +0000649 virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo) const;
Bill Wendling31ceb1b2009-06-30 22:38:32 +0000650
Bill Wendling512ff732009-07-01 18:50:55 +0000651 /// getFunctionAlignment - Return the Log2 alignment of this function.
Bill Wendling31ceb1b2009-06-30 22:38:32 +0000652 virtual unsigned getFunctionAlignment(const Function *F) const;
653
Evan Cheng37b740c2010-07-24 00:39:05 +0000654 unsigned getRegPressureLimit(const TargetRegisterClass *RC,
655 MachineFunction &MF) const;
656
Eric Christopher2ad0c772010-07-06 05:18:56 +0000657 /// getStackCookieLocation - Return true if the target stores stack
658 /// protector cookies at a fixed offset in some non-standard address
659 /// space, and populates the address space and offset as
660 /// appropriate.
661 virtual bool getStackCookieLocation(unsigned &AddressSpace, unsigned &Offset) const;
662
Evan Chengd4218b82010-07-26 21:50:05 +0000663 protected:
664 std::pair<const TargetRegisterClass*, uint8_t>
665 findRepresentativeClass(EVT VT) const;
666
Chris Lattner76ac0682005-11-15 00:40:23 +0000667 private:
Evan Chenga9467aa2006-04-25 20:13:52 +0000668 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
669 /// make the right decision when generating code for different targets.
670 const X86Subtarget *Subtarget;
Dan Gohmaneabd6472008-05-14 01:58:56 +0000671 const X86RegisterInfo *RegInfo;
Anton Korobeynikov6acb2212008-09-09 18:22:57 +0000672 const TargetData *TD;
Evan Chenga9467aa2006-04-25 20:13:52 +0000673
Evan Cheng11b0a5d2006-09-08 06:48:29 +0000674 /// X86StackPtr - X86 physical register used as stack ptr.
675 unsigned X86StackPtr;
Michael J. Spencer9cafc872010-10-20 23:40:27 +0000676
677 /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87
Dale Johannesene36c4002007-09-23 14:52:20 +0000678 /// floating point ops.
679 /// When SSE is available, use it for f32 operations.
680 /// When SSE2 is available, use it for f64 operations.
681 bool X86ScalarSSEf32;
682 bool X86ScalarSSEf64;
Evan Cheng084a1cd2008-01-29 19:34:22 +0000683
Evan Cheng16993aa2009-10-27 19:56:55 +0000684 /// LegalFPImmediates - A list of legal fp immediates.
685 std::vector<APFloat> LegalFPImmediates;
686
687 /// addLegalFPImmediate - Indicate that this x86 target can instruction
688 /// select the specified FP immediate natively.
689 void addLegalFPImmediate(const APFloat& Imm) {
690 LegalFPImmediates.push_back(Imm);
691 }
692
Dan Gohmanf9bbcd12009-08-05 01:29:28 +0000693 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
Sandeep Patel68c5f472009-09-02 08:44:58 +0000694 CallingConv::ID CallConv, bool isVarArg,
Dan Gohmanf9bbcd12009-08-05 01:29:28 +0000695 const SmallVectorImpl<ISD::InputArg> &Ins,
696 DebugLoc dl, SelectionDAG &DAG,
Dan Gohman21cea8a2010-04-17 15:26:15 +0000697 SmallVectorImpl<SDValue> &InVals) const;
Dan Gohmanf9bbcd12009-08-05 01:29:28 +0000698 SDValue LowerMemArgument(SDValue Chain,
Sandeep Patel68c5f472009-09-02 08:44:58 +0000699 CallingConv::ID CallConv,
Dan Gohmanf9bbcd12009-08-05 01:29:28 +0000700 const SmallVectorImpl<ISD::InputArg> &ArgInfo,
701 DebugLoc dl, SelectionDAG &DAG,
702 const CCValAssign &VA, MachineFrameInfo *MFI,
Dan Gohman21cea8a2010-04-17 15:26:15 +0000703 unsigned i) const;
Dan Gohmanf9bbcd12009-08-05 01:29:28 +0000704 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
705 DebugLoc dl, SelectionDAG &DAG,
706 const CCValAssign &VA,
Dan Gohman21cea8a2010-04-17 15:26:15 +0000707 ISD::ArgFlagsTy Flags) const;
Rafael Espindolae636fc02007-08-31 15:06:30 +0000708
Gordon Henriksen92319582008-01-05 16:56:59 +0000709 // Call lowering helpers.
Evan Cheng67a69dd2010-01-27 00:07:07 +0000710
711 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
712 /// for tail call optimization. Targets which want to do tail call
713 /// optimization should implement this function.
Evan Cheng6f36a082010-02-02 23:55:14 +0000714 bool IsEligibleForTailCallOptimization(SDValue Callee,
Evan Cheng67a69dd2010-01-27 00:07:07 +0000715 CallingConv::ID CalleeCC,
716 bool isVarArg,
Evan Chengae5edee2010-03-15 18:54:48 +0000717 bool isCalleeStructRet,
718 bool isCallerStructRet,
Evan Cheng85476f32010-01-27 06:25:16 +0000719 const SmallVectorImpl<ISD::OutputArg> &Outs,
Dan Gohmanfe7532a2010-07-07 15:54:55 +0000720 const SmallVectorImpl<SDValue> &OutVals,
Evan Cheng85476f32010-01-27 06:25:16 +0000721 const SmallVectorImpl<ISD::InputArg> &Ins,
Evan Cheng67a69dd2010-01-27 00:07:07 +0000722 SelectionDAG& DAG) const;
Dan Gohman21cea8a2010-04-17 15:26:15 +0000723 bool IsCalleePop(bool isVarArg, CallingConv::ID CallConv) const;
Dan Gohman2ce6f2a2008-07-27 21:46:04 +0000724 SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr,
725 SDValue Chain, bool IsTailCall, bool Is64Bit,
Dan Gohman21cea8a2010-04-17 15:26:15 +0000726 int FPDiff, DebugLoc dl) const;
Arnold Schwaighofer634fc9a2008-04-12 18:11:06 +0000727
Dan Gohman21cea8a2010-04-17 15:26:15 +0000728 unsigned GetAlignedArgumentStackSize(unsigned StackSize,
729 SelectionDAG &DAG) const;
Evan Chengcde9e302006-01-27 08:10:46 +0000730
Eli Friedmandfe4f252009-05-23 09:59:16 +0000731 std::pair<SDValue,SDValue> FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
Dan Gohman21cea8a2010-04-17 15:26:15 +0000732 bool isSigned) const;
Evan Cheng493b8822009-12-09 21:00:30 +0000733
734 SDValue LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl,
Dan Gohman21cea8a2010-04-17 15:26:15 +0000735 SelectionDAG &DAG) const;
736 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
737 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
738 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
739 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
740 SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) const;
741 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
742 SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) const;
743 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
744 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
745 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
Dale Johannesen021052a2009-02-04 20:06:27 +0000746 SDValue LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl,
747 int64_t Offset, SelectionDAG &DAG) const;
Dan Gohman21cea8a2010-04-17 15:26:15 +0000748 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
749 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
750 SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const;
751 SDValue LowerShift(SDValue Op, SelectionDAG &DAG) const;
Owen Anderson53aa7a92009-08-10 22:56:29 +0000752 SDValue BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot,
Dan Gohman21cea8a2010-04-17 15:26:15 +0000753 SelectionDAG &DAG) const;
Wesley Peck527da1b2010-11-23 03:31:01 +0000754 SDValue LowerBITCAST(SDValue op, SelectionDAG &DAG) const;
Dan Gohman21cea8a2010-04-17 15:26:15 +0000755 SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
756 SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
757 SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) const;
758 SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG) const;
759 SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const;
760 SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const;
761 SDValue LowerFABS(SDValue Op, SelectionDAG &DAG) const;
762 SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG) const;
763 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
Evan Cheng9c8cd8c2010-04-21 01:47:12 +0000764 SDValue LowerToBT(SDValue And, ISD::CondCode CC,
765 DebugLoc dl, SelectionDAG &DAG) const;
Dan Gohman21cea8a2010-04-17 15:26:15 +0000766 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
767 SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
768 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
769 SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
770 SDValue LowerMEMSET(SDValue Op, SelectionDAG &DAG) const;
771 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
772 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
773 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
774 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
775 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
776 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
777 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
778 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
779 SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const;
780 SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
781 SDValue LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
782 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
783 SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) const;
784 SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) const;
785 SDValue LowerMUL_V2I64(SDValue Op, SelectionDAG &DAG) const;
Nate Begeman269a6da2010-07-27 22:37:06 +0000786 SDValue LowerSHL(SDValue Op, SelectionDAG &DAG) const;
Dan Gohman21cea8a2010-04-17 15:26:15 +0000787 SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) const;
Bill Wendling66835472008-11-24 19:21:46 +0000788
Dan Gohman21cea8a2010-04-17 15:26:15 +0000789 SDValue LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
790 SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const;
791 SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const;
Eric Christopher9a773822010-07-22 02:48:34 +0000792 SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const;
Duncan Sands6ed40142008-12-01 11:39:25 +0000793
Bruno Cardoso Lopes9f20e7a2010-08-21 01:32:18 +0000794 // Utility functions to help LowerVECTOR_SHUFFLE
795 SDValue LowerVECTOR_SHUFFLEv8i16(SDValue Op, SelectionDAG &DAG) const;
796
Dan Gohmanf9bbcd12009-08-05 01:29:28 +0000797 virtual SDValue
798 LowerFormalArguments(SDValue Chain,
Sandeep Patel68c5f472009-09-02 08:44:58 +0000799 CallingConv::ID CallConv, bool isVarArg,
Dan Gohmanf9bbcd12009-08-05 01:29:28 +0000800 const SmallVectorImpl<ISD::InputArg> &Ins,
801 DebugLoc dl, SelectionDAG &DAG,
Dan Gohman21cea8a2010-04-17 15:26:15 +0000802 SmallVectorImpl<SDValue> &InVals) const;
Dan Gohmanf9bbcd12009-08-05 01:29:28 +0000803 virtual SDValue
Evan Cheng6f36a082010-02-02 23:55:14 +0000804 LowerCall(SDValue Chain, SDValue Callee,
Evan Cheng67a69dd2010-01-27 00:07:07 +0000805 CallingConv::ID CallConv, bool isVarArg, bool &isTailCall,
Dan Gohmanf9bbcd12009-08-05 01:29:28 +0000806 const SmallVectorImpl<ISD::OutputArg> &Outs,
Dan Gohmanfe7532a2010-07-07 15:54:55 +0000807 const SmallVectorImpl<SDValue> &OutVals,
Dan Gohmanf9bbcd12009-08-05 01:29:28 +0000808 const SmallVectorImpl<ISD::InputArg> &Ins,
809 DebugLoc dl, SelectionDAG &DAG,
Dan Gohman21cea8a2010-04-17 15:26:15 +0000810 SmallVectorImpl<SDValue> &InVals) const;
Dan Gohmanf9bbcd12009-08-05 01:29:28 +0000811
812 virtual SDValue
813 LowerReturn(SDValue Chain,
Sandeep Patel68c5f472009-09-02 08:44:58 +0000814 CallingConv::ID CallConv, bool isVarArg,
Dan Gohmanf9bbcd12009-08-05 01:29:28 +0000815 const SmallVectorImpl<ISD::OutputArg> &Outs,
Dan Gohmanfe7532a2010-07-07 15:54:55 +0000816 const SmallVectorImpl<SDValue> &OutVals,
Dan Gohman21cea8a2010-04-17 15:26:15 +0000817 DebugLoc dl, SelectionDAG &DAG) const;
Dan Gohmanf9bbcd12009-08-05 01:29:28 +0000818
Evan Chengd4b08732010-11-30 23:55:39 +0000819 virtual bool isUsedByReturnOnly(SDNode *N) const;
820
Kenneth Uildriks07119732009-11-07 02:11:54 +0000821 virtual bool
822 CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
Dan Gohmand7b5ce32010-07-10 09:00:22 +0000823 const SmallVectorImpl<ISD::OutputArg> &Outs,
Dan Gohmanee0cb702010-07-06 22:19:37 +0000824 LLVMContext &Context) const;
Kenneth Uildriks07119732009-11-07 02:11:54 +0000825
Duncan Sands6ed40142008-12-01 11:39:25 +0000826 void ReplaceATOMIC_BINARY_64(SDNode *N, SmallVectorImpl<SDValue> &Results,
Dan Gohman21cea8a2010-04-17 15:26:15 +0000827 SelectionDAG &DAG, unsigned NewOp) const;
Duncan Sands6ed40142008-12-01 11:39:25 +0000828
Eric Christopher9fe912d2009-08-18 22:50:32 +0000829 /// Utility function to emit string processing sse4.2 instructions
830 /// that return in xmm0.
Evan Chengb82b5512009-09-19 10:09:15 +0000831 /// This takes the instruction to expand, the associated machine basic
832 /// block, the number of args, and whether or not the second arg is
833 /// in memory or not.
Eric Christopher9fe912d2009-08-18 22:50:32 +0000834 MachineBasicBlock *EmitPCMP(MachineInstr *BInstr, MachineBasicBlock *BB,
Mon P Wangc576ee92010-04-04 03:10:48 +0000835 unsigned argNum, bool inMem) const;
Eric Christopher9fe912d2009-08-18 22:50:32 +0000836
Eric Christopherfa6657c2010-11-30 07:20:12 +0000837 /// Utility functions to emit monitor and mwait instructions. These
838 /// need to make sure that the arguments to the intrinsic are in the
839 /// correct registers.
Eric Christopher1a86e842010-11-30 08:10:28 +0000840 MachineBasicBlock *EmitMonitor(MachineInstr *MI,
841 MachineBasicBlock *BB) const;
Eric Christopherfa6657c2010-11-30 07:20:12 +0000842 MachineBasicBlock *EmitMwait(MachineInstr *MI, MachineBasicBlock *BB) const;
843
Mon P Wang3e583932008-05-05 19:05:59 +0000844 /// Utility function to emit atomic bitwise operations (and, or, xor).
Evan Chengb82b5512009-09-19 10:09:15 +0000845 /// It takes the bitwise instruction to expand, the associated machine basic
846 /// block, and the associated X86 opcodes for reg/reg and reg/imm.
Mon P Wang3e583932008-05-05 19:05:59 +0000847 MachineBasicBlock *EmitAtomicBitwiseWithCustomInserter(
848 MachineInstr *BInstr,
849 MachineBasicBlock *BB,
850 unsigned regOpc,
Andrew Lenharthf88d50b2008-06-14 05:48:15 +0000851 unsigned immOpc,
Dale Johannesen5afbf512008-08-19 18:47:28 +0000852 unsigned loadOpc,
853 unsigned cxchgOpc,
Dale Johannesen5afbf512008-08-19 18:47:28 +0000854 unsigned notOpc,
855 unsigned EAXreg,
856 TargetRegisterClass *RC,
Dan Gohman747e55b2009-02-07 16:15:20 +0000857 bool invSrc = false) const;
Dale Johannesen867d5492008-10-02 18:53:47 +0000858
859 MachineBasicBlock *EmitAtomicBit6432WithCustomInserter(
860 MachineInstr *BInstr,
861 MachineBasicBlock *BB,
862 unsigned regOpcL,
863 unsigned regOpcH,
864 unsigned immOpcL,
865 unsigned immOpcH,
Dan Gohman747e55b2009-02-07 16:15:20 +0000866 bool invSrc = false) const;
Michael J. Spencer9cafc872010-10-20 23:40:27 +0000867
Mon P Wang3e583932008-05-05 19:05:59 +0000868 /// Utility function to emit atomic min and max. It takes the min/max
Bill Wendling189d6712009-03-26 01:46:56 +0000869 /// instruction to expand, the associated basic block, and the associated
870 /// cmov opcode for moving the min or max value.
Mon P Wang3e583932008-05-05 19:05:59 +0000871 MachineBasicBlock *EmitAtomicMinMaxWithCustomInserter(MachineInstr *BInstr,
872 MachineBasicBlock *BB,
Dan Gohman747e55b2009-02-07 16:15:20 +0000873 unsigned cmovOpc) const;
Dan Gohman55d7b2a2009-03-04 19:44:21 +0000874
Dan Gohman395a8982010-10-12 18:00:49 +0000875 // Utility function to emit the low-level va_arg code for X86-64.
876 MachineBasicBlock *EmitVAARG64WithCustomInserter(
877 MachineInstr *MI,
878 MachineBasicBlock *MBB) const;
879
Dan Gohman0700a562009-08-15 01:38:56 +0000880 /// Utility function to emit the xmm reg save portion of va_start.
881 MachineBasicBlock *EmitVAStartSaveXMMRegsWithCustomInserter(
882 MachineInstr *BInstr,
883 MachineBasicBlock *BB) const;
884
Chris Lattnerd5f4fcc2009-09-02 05:57:00 +0000885 MachineBasicBlock *EmitLoweredSelect(MachineInstr *I,
Dan Gohman25c16532010-05-01 00:01:06 +0000886 MachineBasicBlock *BB) const;
Anton Korobeynikovd5e3fd62010-03-06 19:32:29 +0000887
Michael J. Spencerf509c6c2010-10-21 01:41:01 +0000888 MachineBasicBlock *EmitLoweredWinAlloca(MachineInstr *MI,
Dan Gohman25c16532010-05-01 00:01:06 +0000889 MachineBasicBlock *BB) const;
Michael J. Spencer9cafc872010-10-20 23:40:27 +0000890
Eric Christopherb0e1a452010-06-03 04:07:48 +0000891 MachineBasicBlock *EmitLoweredTLSCall(MachineInstr *MI,
892 MachineBasicBlock *BB) const;
Anton Korobeynikovd5e3fd62010-03-06 19:32:29 +0000893
Rafael Espindola5d882892010-11-27 20:43:02 +0000894 MachineBasicBlock *emitLoweredTLSAddr(MachineInstr *MI,
895 MachineBasicBlock *BB) const;
896
Dan Gohman55d7b2a2009-03-04 19:44:21 +0000897 /// Emit nodes that will be selected as "test Op0,Op0", or something
Dan Gohmanff659b52009-03-07 01:58:32 +0000898 /// equivalent, for use with the given x86 condition code.
Evan Cheng6e45f1d2010-04-26 19:06:11 +0000899 SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG) const;
Dan Gohman55d7b2a2009-03-04 19:44:21 +0000900
901 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
Dan Gohmanff659b52009-03-07 01:58:32 +0000902 /// equivalent, for use with the given x86 condition code.
Evan Cheng6e45f1d2010-04-26 19:06:11 +0000903 SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
Dan Gohman21cea8a2010-04-17 15:26:15 +0000904 SelectionDAG &DAG) const;
Chris Lattner76ac0682005-11-15 00:40:23 +0000905 };
Evan Cheng24422d42008-09-03 00:03:49 +0000906
907 namespace X86 {
Dan Gohman87fb4e82010-07-07 16:29:44 +0000908 FastISel *createFastISel(FunctionLoweringInfo &funcInfo);
Evan Cheng24422d42008-09-03 00:03:49 +0000909 }
Chris Lattner76ac0682005-11-15 00:40:23 +0000910}
911
Chris Lattner76ac0682005-11-15 00:40:23 +0000912#endif // X86ISELLOWERING_H