Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 1 | //===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
Chris Lattner | 4ee451d | 2007-12-29 20:36:04 +0000 | [diff] [blame] | 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file defines the interfaces that X86 uses to lower LLVM code into a |
| 11 | // selection DAG. |
| 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
| 15 | #ifndef X86ISELLOWERING_H |
| 16 | #define X86ISELLOWERING_H |
| 17 | |
Evan Cheng | 559806f | 2006-01-27 08:10:46 +0000 | [diff] [blame] | 18 | #include "X86Subtarget.h" |
Anton Korobeynikov | 2365f51 | 2007-07-14 14:06:15 +0000 | [diff] [blame] | 19 | #include "X86RegisterInfo.h" |
Gordon Henriksen | 8673766 | 2008-01-05 16:56:59 +0000 | [diff] [blame] | 20 | #include "X86MachineFunctionInfo.h" |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 21 | #include "llvm/Target/TargetLowering.h" |
Evan Cheng | ddc419c | 2010-01-26 19:04:47 +0000 | [diff] [blame] | 22 | #include "llvm/Target/TargetOptions.h" |
Ted Kremenek | b388eb8 | 2008-09-03 02:54:11 +0000 | [diff] [blame] | 23 | #include "llvm/CodeGen/FastISel.h" |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 24 | #include "llvm/CodeGen/SelectionDAG.h" |
Rafael Espindola | 1b5dcc3 | 2007-08-31 15:06:30 +0000 | [diff] [blame] | 25 | #include "llvm/CodeGen/CallingConvLower.h" |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 26 | |
| 27 | namespace llvm { |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 28 | namespace X86ISD { |
Evan Cheng | d9558e0 | 2006-01-06 00:43:03 +0000 | [diff] [blame] | 29 | // X86 Specific DAG Nodes |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 30 | enum NodeType { |
| 31 | // Start the numbering where the builtin ops leave off. |
Dan Gohman | 0ba2bcf | 2008-09-23 18:42:32 +0000 | [diff] [blame] | 32 | FIRST_NUMBER = ISD::BUILTIN_OP_END, |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 33 | |
Evan Cheng | 18efe26 | 2007-12-14 02:13:44 +0000 | [diff] [blame] | 34 | /// BSF - Bit scan forward. |
| 35 | /// BSR - Bit scan reverse. |
| 36 | BSF, |
| 37 | BSR, |
| 38 | |
Evan Cheng | e341316 | 2006-01-09 18:33:28 +0000 | [diff] [blame] | 39 | /// SHLD, SHRD - Double shift instructions. These correspond to |
| 40 | /// X86::SHLDxx and X86::SHRDxx instructions. |
| 41 | SHLD, |
| 42 | SHRD, |
| 43 | |
Evan Cheng | ef6ffb1 | 2006-01-31 03:14:29 +0000 | [diff] [blame] | 44 | /// FAND - Bitwise logical AND of floating point values. This corresponds |
| 45 | /// to X86::ANDPS or X86::ANDPD. |
| 46 | FAND, |
| 47 | |
Evan Cheng | 68c47cb | 2007-01-05 07:55:56 +0000 | [diff] [blame] | 48 | /// FOR - Bitwise logical OR of floating point values. This corresponds |
| 49 | /// to X86::ORPS or X86::ORPD. |
| 50 | FOR, |
| 51 | |
Evan Cheng | 223547a | 2006-01-31 22:28:30 +0000 | [diff] [blame] | 52 | /// FXOR - Bitwise logical XOR of floating point values. This corresponds |
| 53 | /// to X86::XORPS or X86::XORPD. |
| 54 | FXOR, |
| 55 | |
Evan Cheng | 73d6cf1 | 2007-01-05 21:37:56 +0000 | [diff] [blame] | 56 | /// FSRL - Bitwise logical right shift of floating point values. These |
| 57 | /// corresponds to X86::PSRLDQ. |
Evan Cheng | 68c47cb | 2007-01-05 07:55:56 +0000 | [diff] [blame] | 58 | FSRL, |
| 59 | |
Dan Gohman | 98ca4f2 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 60 | /// CALL - These operations represent an abstract X86 call |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 61 | /// instruction, which includes a bunch of information. In particular the |
| 62 | /// operands of these node are: |
| 63 | /// |
| 64 | /// #0 - The incoming token chain |
| 65 | /// #1 - The callee |
| 66 | /// #2 - The number of arg bytes the caller pushes on the stack. |
| 67 | /// #3 - The number of arg bytes the callee pops off the stack. |
| 68 | /// #4 - The value to pass in AL/AX/EAX (optional) |
| 69 | /// #5 - The value to pass in DL/DX/EDX (optional) |
| 70 | /// |
| 71 | /// The result values of these nodes are: |
| 72 | /// |
| 73 | /// #0 - The outgoing token chain |
| 74 | /// #1 - The first register result value (optional) |
| 75 | /// #2 - The second register result value (optional) |
| 76 | /// |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 77 | CALL, |
Dan Gohman | 98ca4f2 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 78 | |
Michael J. Spencer | 6e56b18 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 79 | /// RDTSC_DAG - This operation implements the lowering for |
Andrew Lenharth | b873ff3 | 2005-11-20 21:41:10 +0000 | [diff] [blame] | 80 | /// readcyclecounter |
| 81 | RDTSC_DAG, |
Evan Cheng | 7df96d6 | 2005-12-17 01:21:05 +0000 | [diff] [blame] | 82 | |
| 83 | /// X86 compare and logical compare instructions. |
Evan Cheng | 7d6ff3a | 2007-09-17 17:42:53 +0000 | [diff] [blame] | 84 | CMP, COMI, UCOMI, |
Evan Cheng | 7df96d6 | 2005-12-17 01:21:05 +0000 | [diff] [blame] | 85 | |
Dan Gohman | c7a37d4 | 2008-12-23 22:45:23 +0000 | [diff] [blame] | 86 | /// X86 bit-test instructions. |
| 87 | BT, |
| 88 | |
Dan Gohman | 2004eb6 | 2009-03-23 15:40:10 +0000 | [diff] [blame] | 89 | /// X86 SetCC. Operand 0 is condition code, and operand 1 is the flag |
Evan Cheng | d5781fc | 2005-12-21 20:21:51 +0000 | [diff] [blame] | 90 | /// operand produced by a CMP instruction. |
| 91 | SETCC, |
| 92 | |
Evan Cheng | ad9c0a3 | 2009-12-15 00:53:42 +0000 | [diff] [blame] | 93 | // Same as SETCC except it's materialized with a sbb and the value is all |
| 94 | // one's or all zero's. |
Chris Lattner | c19d1c3 | 2010-12-19 22:08:31 +0000 | [diff] [blame] | 95 | SETCC_CARRY, // R = carry_bit ? ~0 : 0 |
Evan Cheng | ad9c0a3 | 2009-12-15 00:53:42 +0000 | [diff] [blame] | 96 | |
Chris Lattner | 2b9f434 | 2009-03-12 06:46:02 +0000 | [diff] [blame] | 97 | /// X86 conditional moves. Operand 0 and operand 1 are the two values |
| 98 | /// to select from. Operand 2 is the condition code, and operand 3 is the |
| 99 | /// flag operand produced by a CMP or TEST instruction. It also writes a |
| 100 | /// flag result. |
Evan Cheng | 7df96d6 | 2005-12-17 01:21:05 +0000 | [diff] [blame] | 101 | CMOV, |
Evan Cheng | 898101c | 2005-12-19 23:12:38 +0000 | [diff] [blame] | 102 | |
Dan Gohman | 2004eb6 | 2009-03-23 15:40:10 +0000 | [diff] [blame] | 103 | /// X86 conditional branches. Operand 0 is the chain operand, operand 1 |
| 104 | /// is the block to branch if condition is true, operand 2 is the |
| 105 | /// condition code, and operand 3 is the flag operand produced by a CMP |
Evan Cheng | d5781fc | 2005-12-21 20:21:51 +0000 | [diff] [blame] | 106 | /// or TEST instruction. |
Evan Cheng | 898101c | 2005-12-19 23:12:38 +0000 | [diff] [blame] | 107 | BRCOND, |
Evan Cheng | b077b84 | 2005-12-21 02:39:21 +0000 | [diff] [blame] | 108 | |
Dan Gohman | 2004eb6 | 2009-03-23 15:40:10 +0000 | [diff] [blame] | 109 | /// Return with a flag operand. Operand 0 is the chain operand, operand |
| 110 | /// 1 is the number of bytes of stack to pop. |
Evan Cheng | b077b84 | 2005-12-21 02:39:21 +0000 | [diff] [blame] | 111 | RET_FLAG, |
Evan Cheng | 67f92a7 | 2006-01-11 22:15:48 +0000 | [diff] [blame] | 112 | |
| 113 | /// REP_STOS - Repeat fill, corresponds to X86::REP_STOSx. |
| 114 | REP_STOS, |
| 115 | |
| 116 | /// REP_MOVS - Repeat move, corresponds to X86::REP_MOVSx. |
| 117 | REP_MOVS, |
Evan Cheng | 223547a | 2006-01-31 22:28:30 +0000 | [diff] [blame] | 118 | |
Evan Cheng | 7ccced6 | 2006-02-18 00:15:05 +0000 | [diff] [blame] | 119 | /// GlobalBaseReg - On Darwin, this node represents the result of the popl |
| 120 | /// at function entry, used for PIC code. |
| 121 | GlobalBaseReg, |
Evan Cheng | a0ea053 | 2006-02-23 02:43:52 +0000 | [diff] [blame] | 122 | |
Bill Wendling | 056292f | 2008-09-16 21:48:12 +0000 | [diff] [blame] | 123 | /// Wrapper - A wrapper node for TargetConstantPool, |
| 124 | /// TargetExternalSymbol, and TargetGlobalAddress. |
Evan Cheng | 020d2e8 | 2006-02-23 20:41:18 +0000 | [diff] [blame] | 125 | Wrapper, |
Evan Cheng | 48090aa | 2006-03-21 23:01:21 +0000 | [diff] [blame] | 126 | |
Evan Cheng | 0085a28 | 2006-11-30 21:55:46 +0000 | [diff] [blame] | 127 | /// WrapperRIP - Special wrapper used under X86-64 PIC mode for RIP |
| 128 | /// relative displacements. |
| 129 | WrapperRIP, |
| 130 | |
Dale Johannesen | 0488fb6 | 2010-09-30 23:57:10 +0000 | [diff] [blame] | 131 | /// MOVQ2DQ - Copies a 64-bit value from an MMX vector to the low word |
| 132 | /// of an XMM vector, with the high word zero filled. |
Mon P Wang | eb38ebf | 2010-01-24 00:05:03 +0000 | [diff] [blame] | 133 | MOVQ2DQ, |
| 134 | |
Dale Johannesen | 0488fb6 | 2010-09-30 23:57:10 +0000 | [diff] [blame] | 135 | /// MOVDQ2Q - Copies a 64-bit value from the low word of an XMM vector |
| 136 | /// to an MMX vector. If you think this is too close to the previous |
| 137 | /// mnemonic, so do I; blame Intel. |
| 138 | MOVDQ2Q, |
| 139 | |
Nate Begeman | 14d12ca | 2008-02-11 04:19:36 +0000 | [diff] [blame] | 140 | /// PEXTRB - Extract an 8-bit value from a vector and zero extend it to |
| 141 | /// i32, corresponds to X86::PEXTRB. |
| 142 | PEXTRB, |
| 143 | |
Evan Cheng | b067a1e | 2006-03-31 19:22:53 +0000 | [diff] [blame] | 144 | /// PEXTRW - Extract a 16-bit value from a vector and zero extend it to |
Evan Cheng | 653159f | 2006-03-31 21:55:24 +0000 | [diff] [blame] | 145 | /// i32, corresponds to X86::PEXTRW. |
Evan Cheng | b067a1e | 2006-03-31 19:22:53 +0000 | [diff] [blame] | 146 | PEXTRW, |
Evan Cheng | 653159f | 2006-03-31 21:55:24 +0000 | [diff] [blame] | 147 | |
Nate Begeman | 14d12ca | 2008-02-11 04:19:36 +0000 | [diff] [blame] | 148 | /// INSERTPS - Insert any element of a 4 x float vector into any element |
| 149 | /// of a destination 4 x floatvector. |
| 150 | INSERTPS, |
| 151 | |
| 152 | /// PINSRB - Insert the lower 8-bits of a 32-bit value to a vector, |
| 153 | /// corresponds to X86::PINSRB. |
| 154 | PINSRB, |
| 155 | |
Evan Cheng | 653159f | 2006-03-31 21:55:24 +0000 | [diff] [blame] | 156 | /// PINSRW - Insert the lower 16-bits of a 32-bit value to a vector, |
| 157 | /// corresponds to X86::PINSRW. |
Chris Lattner | 8f2b4cc | 2010-02-23 02:07:48 +0000 | [diff] [blame] | 158 | PINSRW, MMX_PINSRW, |
Evan Cheng | 8ca2932 | 2006-11-10 21:43:37 +0000 | [diff] [blame] | 159 | |
Nate Begeman | b9a47b8 | 2009-02-23 08:49:38 +0000 | [diff] [blame] | 160 | /// PSHUFB - Shuffle 16 8-bit values within a vector. |
| 161 | PSHUFB, |
Nate Begeman | b65c175 | 2010-12-17 22:55:37 +0000 | [diff] [blame] | 162 | |
| 163 | /// PANDN - and with not'd value. |
| 164 | PANDN, |
| 165 | |
| 166 | /// PSIGNB/W/D - Copy integer sign. |
| 167 | PSIGNB, PSIGNW, PSIGND, |
| 168 | |
Evan Cheng | 8ca2932 | 2006-11-10 21:43:37 +0000 | [diff] [blame] | 169 | /// FMAX, FMIN - Floating point max and min. |
| 170 | /// |
Lauro Ramos Venancio | b3a0417 | 2007-04-20 21:38:10 +0000 | [diff] [blame] | 171 | FMAX, FMIN, |
Dan Gohman | 2038252 | 2007-07-10 00:05:58 +0000 | [diff] [blame] | 172 | |
| 173 | /// FRSQRT, FRCP - Floating point reciprocal-sqrt and reciprocal |
| 174 | /// approximation. Note that these typically require refinement |
| 175 | /// in order to obtain suitable precision. |
| 176 | FRSQRT, FRCP, |
| 177 | |
Rafael Espindola | 094fad3 | 2009-04-08 21:14:34 +0000 | [diff] [blame] | 178 | // TLSADDR - Thread Local Storage. |
| 179 | TLSADDR, |
Michael J. Spencer | 6e56b18 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 180 | |
Eric Christopher | 30ef0e5 | 2010-06-03 04:07:48 +0000 | [diff] [blame] | 181 | // TLSCALL - Thread Local Storage. When calling to an OS provided |
| 182 | // thunk at the address from an earlier relocation. |
| 183 | TLSCALL, |
Rafael Espindola | 094fad3 | 2009-04-08 21:14:34 +0000 | [diff] [blame] | 184 | |
Evan Cheng | 7e2ff77 | 2008-05-08 00:57:18 +0000 | [diff] [blame] | 185 | // EH_RETURN - Exception Handling helpers. |
Arnold Schwaighofer | c85e171 | 2007-10-11 19:40:01 +0000 | [diff] [blame] | 186 | EH_RETURN, |
Michael J. Spencer | 6e56b18 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 187 | |
Arnold Schwaighofer | 4fe3073 | 2008-03-19 16:39:45 +0000 | [diff] [blame] | 188 | /// TC_RETURN - Tail call return. |
| 189 | /// operand #0 chain |
| 190 | /// operand #1 callee (register or absolute) |
| 191 | /// operand #2 stack adjustment |
| 192 | /// operand #3 optional in flag |
Anton Korobeynikov | 45b22fa | 2007-11-16 01:31:51 +0000 | [diff] [blame] | 193 | TC_RETURN, |
| 194 | |
Evan Cheng | d880b97 | 2008-05-09 21:53:03 +0000 | [diff] [blame] | 195 | // VZEXT_MOVL - Vector move low and zero extend. |
| 196 | VZEXT_MOVL, |
| 197 | |
Evan Cheng | f26ffe9 | 2008-05-29 08:22:04 +0000 | [diff] [blame] | 198 | // VSHL, VSRL - Vector logical left / right shift. |
Nate Begeman | 30a0de9 | 2008-07-17 16:51:19 +0000 | [diff] [blame] | 199 | VSHL, VSRL, |
Nate Begeman | 9008ca6 | 2009-04-27 18:41:29 +0000 | [diff] [blame] | 200 | |
| 201 | // CMPPD, CMPPS - Vector double/float comparison. |
Nate Begeman | 30a0de9 | 2008-07-17 16:51:19 +0000 | [diff] [blame] | 202 | // CMPPD, CMPPS - Vector double/float comparison. |
| 203 | CMPPD, CMPPS, |
Michael J. Spencer | 6e56b18 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 204 | |
Nate Begeman | 30a0de9 | 2008-07-17 16:51:19 +0000 | [diff] [blame] | 205 | // PCMP* - Vector integer comparisons. |
| 206 | PCMPEQB, PCMPEQW, PCMPEQD, PCMPEQQ, |
Bill Wendling | ab55ebd | 2008-12-12 00:56:36 +0000 | [diff] [blame] | 207 | PCMPGTB, PCMPGTW, PCMPGTD, PCMPGTQ, |
| 208 | |
Chris Lattner | b20e0b1 | 2010-12-05 07:30:36 +0000 | [diff] [blame] | 209 | // ADD, SUB, SMUL, etc. - Arithmetic operations with FLAGS results. |
| 210 | ADD, SUB, SMUL, |
Dan Gohman | e220c4b | 2009-09-18 19:59:53 +0000 | [diff] [blame] | 211 | INC, DEC, OR, XOR, AND, |
Chris Lattner | b20e0b1 | 2010-12-05 07:30:36 +0000 | [diff] [blame] | 212 | |
| 213 | UMUL, // LOW, HI, FLAGS = umul LHS, RHS |
Evan Cheng | 73f24c9 | 2009-03-30 21:36:47 +0000 | [diff] [blame] | 214 | |
| 215 | // MUL_IMM - X86 specific multiply by immediate. |
Eric Christopher | 71c6753 | 2009-07-29 00:28:05 +0000 | [diff] [blame] | 216 | MUL_IMM, |
Michael J. Spencer | 6e56b18 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 217 | |
Eric Christopher | 71c6753 | 2009-07-29 00:28:05 +0000 | [diff] [blame] | 218 | // PTEST - Vector bitwise comparisons |
Dan Gohman | d6708ea | 2009-08-15 01:38:56 +0000 | [diff] [blame] | 219 | PTEST, |
| 220 | |
Bruno Cardoso Lopes | 045573c | 2010-08-10 23:25:42 +0000 | [diff] [blame] | 221 | // TESTP - Vector packed fp sign bitwise comparisons |
| 222 | TESTP, |
| 223 | |
Bruno Cardoso Lopes | 3157ef1 | 2010-08-20 22:55:05 +0000 | [diff] [blame] | 224 | // Several flavors of instructions with vector shuffle behaviors. |
| 225 | PALIGN, |
| 226 | PSHUFD, |
| 227 | PSHUFHW, |
| 228 | PSHUFLW, |
| 229 | PSHUFHW_LD, |
| 230 | PSHUFLW_LD, |
| 231 | SHUFPD, |
| 232 | SHUFPS, |
| 233 | MOVDDUP, |
| 234 | MOVSHDUP, |
| 235 | MOVSLDUP, |
| 236 | MOVSHDUP_LD, |
| 237 | MOVSLDUP_LD, |
| 238 | MOVLHPS, |
Bruno Cardoso Lopes | 3157ef1 | 2010-08-20 22:55:05 +0000 | [diff] [blame] | 239 | MOVLHPD, |
Bruno Cardoso Lopes | f2db5b4 | 2010-08-31 21:15:21 +0000 | [diff] [blame] | 240 | MOVHLPS, |
Bruno Cardoso Lopes | 3157ef1 | 2010-08-20 22:55:05 +0000 | [diff] [blame] | 241 | MOVHLPD, |
Bruno Cardoso Lopes | 56098f5 | 2010-09-01 05:08:25 +0000 | [diff] [blame] | 242 | MOVLPS, |
| 243 | MOVLPD, |
Bruno Cardoso Lopes | 3157ef1 | 2010-08-20 22:55:05 +0000 | [diff] [blame] | 244 | MOVSD, |
| 245 | MOVSS, |
| 246 | UNPCKLPS, |
| 247 | UNPCKLPD, |
| 248 | UNPCKHPS, |
| 249 | UNPCKHPD, |
| 250 | PUNPCKLBW, |
| 251 | PUNPCKLWD, |
| 252 | PUNPCKLDQ, |
| 253 | PUNPCKLQDQ, |
| 254 | PUNPCKHBW, |
| 255 | PUNPCKHWD, |
| 256 | PUNPCKHDQ, |
| 257 | PUNPCKHQDQ, |
| 258 | |
Dan Gohman | d6708ea | 2009-08-15 01:38:56 +0000 | [diff] [blame] | 259 | // VASTART_SAVE_XMM_REGS - Save xmm argument registers to the stack, |
| 260 | // according to %al. An operator is needed so that this can be expanded |
| 261 | // with control flow. |
Dan Gohman | c76909a | 2009-09-25 20:36:54 +0000 | [diff] [blame] | 262 | VASTART_SAVE_XMM_REGS, |
| 263 | |
Michael J. Spencer | e9c253e | 2010-10-21 01:41:01 +0000 | [diff] [blame] | 264 | // WIN_ALLOCA - Windows's _chkstk call to do stack probing. |
| 265 | WIN_ALLOCA, |
Anton Korobeynikov | 043f3c2 | 2010-03-06 19:32:29 +0000 | [diff] [blame] | 266 | |
Duncan Sands | 59d2dad | 2010-11-20 11:25:00 +0000 | [diff] [blame] | 267 | // Memory barrier |
| 268 | MEMBARRIER, |
| 269 | MFENCE, |
| 270 | SFENCE, |
| 271 | LFENCE, |
| 272 | |
Michael J. Spencer | 6e56b18 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 273 | // ATOMADD64_DAG, ATOMSUB64_DAG, ATOMOR64_DAG, ATOMAND64_DAG, |
| 274 | // ATOMXOR64_DAG, ATOMNAND64_DAG, ATOMSWAP64_DAG - |
Dan Gohman | c76909a | 2009-09-25 20:36:54 +0000 | [diff] [blame] | 275 | // Atomic 64-bit binary operations. |
| 276 | ATOMADD64_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE, |
| 277 | ATOMSUB64_DAG, |
| 278 | ATOMOR64_DAG, |
| 279 | ATOMXOR64_DAG, |
| 280 | ATOMAND64_DAG, |
| 281 | ATOMNAND64_DAG, |
Eric Christopher | 9a9d275 | 2010-07-22 02:48:34 +0000 | [diff] [blame] | 282 | ATOMSWAP64_DAG, |
Michael J. Spencer | 6e56b18 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 283 | |
Chris Lattner | 93c4a5b | 2010-09-21 23:59:42 +0000 | [diff] [blame] | 284 | // LCMPXCHG_DAG, LCMPXCHG8_DAG - Compare and swap. |
| 285 | LCMPXCHG_DAG, |
Chris Lattner | 8864155 | 2010-09-22 00:34:38 +0000 | [diff] [blame] | 286 | LCMPXCHG8_DAG, |
Anton Korobeynikov | 043f3c2 | 2010-03-06 19:32:29 +0000 | [diff] [blame] | 287 | |
Chris Lattner | 8864155 | 2010-09-22 00:34:38 +0000 | [diff] [blame] | 288 | // VZEXT_LOAD - Load, scalar_to_vector, and zero extend. |
Chris Lattner | 0729093 | 2010-09-22 01:05:16 +0000 | [diff] [blame] | 289 | VZEXT_LOAD, |
Michael J. Spencer | 6e56b18 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 290 | |
Chris Lattner | 2156b79 | 2010-09-22 01:11:26 +0000 | [diff] [blame] | 291 | // FNSTCW16m - Store FP control world into i16 memory. |
| 292 | FNSTCW16m, |
Michael J. Spencer | 6e56b18 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 293 | |
Chris Lattner | 0729093 | 2010-09-22 01:05:16 +0000 | [diff] [blame] | 294 | /// FP_TO_INT*_IN_MEM - This instruction implements FP_TO_SINT with the |
| 295 | /// integer destination in memory and a FP reg source. This corresponds |
| 296 | /// to the X86::FIST*m instructions and the rounding mode change stuff. It |
| 297 | /// has two inputs (token chain and address) and two outputs (int value |
| 298 | /// and token chain). |
| 299 | FP_TO_INT16_IN_MEM, |
| 300 | FP_TO_INT32_IN_MEM, |
Chris Lattner | 492a43e | 2010-09-22 01:28:21 +0000 | [diff] [blame] | 301 | FP_TO_INT64_IN_MEM, |
Michael J. Spencer | 6e56b18 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 302 | |
Chris Lattner | 492a43e | 2010-09-22 01:28:21 +0000 | [diff] [blame] | 303 | /// FILD, FILD_FLAG - This instruction implements SINT_TO_FP with the |
| 304 | /// integer source in memory and FP reg result. This corresponds to the |
| 305 | /// X86::FILD*m instructions. It has three inputs (token chain, address, |
| 306 | /// and source type) and two outputs (FP value and token chain). FILD_FLAG |
| 307 | /// also produces a flag). |
| 308 | FILD, |
| 309 | FILD_FLAG, |
Michael J. Spencer | 6e56b18 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 310 | |
Chris Lattner | 492a43e | 2010-09-22 01:28:21 +0000 | [diff] [blame] | 311 | /// FLD - This instruction implements an extending load to FP stack slots. |
| 312 | /// This corresponds to the X86::FLD32m / X86::FLD64m. It takes a chain |
| 313 | /// operand, ptr to load from, and a ValueType node indicating the type |
| 314 | /// to load to. |
| 315 | FLD, |
Michael J. Spencer | 6e56b18 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 316 | |
Chris Lattner | 492a43e | 2010-09-22 01:28:21 +0000 | [diff] [blame] | 317 | /// FST - This instruction implements a truncating store to FP stack |
| 318 | /// slots. This corresponds to the X86::FST32m / X86::FST64m. It takes a |
| 319 | /// chain operand, value to store, address, and a ValueType to store it |
| 320 | /// as. |
Dan Gohman | 320afb8 | 2010-10-12 18:00:49 +0000 | [diff] [blame] | 321 | FST, |
| 322 | |
| 323 | /// VAARG_64 - This instruction grabs the address of the next argument |
| 324 | /// from a va_list. (reads and modifies the va_list in memory) |
| 325 | VAARG_64 |
Michael J. Spencer | 6e56b18 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 326 | |
Anton Korobeynikov | 043f3c2 | 2010-03-06 19:32:29 +0000 | [diff] [blame] | 327 | // WARNING: Do not add anything in the end unless you want the node to |
| 328 | // have memop! In fact, starting from ATOMADD64_DAG all opcodes will be |
| 329 | // thought as target memory ops! |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 330 | }; |
| 331 | } |
| 332 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 333 | /// Define some predicates that are used for node matching. |
| 334 | namespace X86 { |
| 335 | /// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand |
| 336 | /// specifies a shuffle of elements that is suitable for input to PSHUFD. |
Nate Begeman | 9008ca6 | 2009-04-27 18:41:29 +0000 | [diff] [blame] | 337 | bool isPSHUFDMask(ShuffleVectorSDNode *N); |
Evan Cheng | 0188ecb | 2006-03-22 18:59:22 +0000 | [diff] [blame] | 338 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 339 | /// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand |
| 340 | /// specifies a shuffle of elements that is suitable for input to PSHUFD. |
Nate Begeman | 9008ca6 | 2009-04-27 18:41:29 +0000 | [diff] [blame] | 341 | bool isPSHUFHWMask(ShuffleVectorSDNode *N); |
Evan Cheng | 506d3df | 2006-03-29 23:07:14 +0000 | [diff] [blame] | 342 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 343 | /// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand |
| 344 | /// specifies a shuffle of elements that is suitable for input to PSHUFD. |
Nate Begeman | 9008ca6 | 2009-04-27 18:41:29 +0000 | [diff] [blame] | 345 | bool isPSHUFLWMask(ShuffleVectorSDNode *N); |
Evan Cheng | 506d3df | 2006-03-29 23:07:14 +0000 | [diff] [blame] | 346 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 347 | /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand |
| 348 | /// specifies a shuffle of elements that is suitable for input to SHUFP*. |
Nate Begeman | 9008ca6 | 2009-04-27 18:41:29 +0000 | [diff] [blame] | 349 | bool isSHUFPMask(ShuffleVectorSDNode *N); |
Evan Cheng | 14aed5e | 2006-03-24 01:18:28 +0000 | [diff] [blame] | 350 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 351 | /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand |
| 352 | /// specifies a shuffle of elements that is suitable for input to MOVHLPS. |
Nate Begeman | 9008ca6 | 2009-04-27 18:41:29 +0000 | [diff] [blame] | 353 | bool isMOVHLPSMask(ShuffleVectorSDNode *N); |
Evan Cheng | 2c0dbd0 | 2006-03-24 02:58:06 +0000 | [diff] [blame] | 354 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 355 | /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form |
| 356 | /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, |
| 357 | /// <2, 3, 2, 3> |
Nate Begeman | 9008ca6 | 2009-04-27 18:41:29 +0000 | [diff] [blame] | 358 | bool isMOVHLPS_v_undef_Mask(ShuffleVectorSDNode *N); |
Evan Cheng | 6e56e2c | 2006-11-07 22:14:24 +0000 | [diff] [blame] | 359 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 360 | /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand |
Nate Begeman | 9008ca6 | 2009-04-27 18:41:29 +0000 | [diff] [blame] | 361 | /// specifies a shuffle of elements that is suitable for MOVLP{S|D}. |
| 362 | bool isMOVLPMask(ShuffleVectorSDNode *N); |
Evan Cheng | 5ced1d8 | 2006-04-06 23:23:56 +0000 | [diff] [blame] | 363 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 364 | /// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand |
Nate Begeman | 9008ca6 | 2009-04-27 18:41:29 +0000 | [diff] [blame] | 365 | /// specifies a shuffle of elements that is suitable for MOVHP{S|D}. |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 366 | /// as well as MOVLHPS. |
Nate Begeman | 0b10b91 | 2009-11-07 23:17:15 +0000 | [diff] [blame] | 367 | bool isMOVLHPSMask(ShuffleVectorSDNode *N); |
Evan Cheng | 5ced1d8 | 2006-04-06 23:23:56 +0000 | [diff] [blame] | 368 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 369 | /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand |
| 370 | /// specifies a shuffle of elements that is suitable for input to UNPCKL. |
Nate Begeman | 9008ca6 | 2009-04-27 18:41:29 +0000 | [diff] [blame] | 371 | bool isUNPCKLMask(ShuffleVectorSDNode *N, bool V2IsSplat = false); |
Evan Cheng | 0038e59 | 2006-03-28 00:39:58 +0000 | [diff] [blame] | 372 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 373 | /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand |
| 374 | /// specifies a shuffle of elements that is suitable for input to UNPCKH. |
Nate Begeman | 9008ca6 | 2009-04-27 18:41:29 +0000 | [diff] [blame] | 375 | bool isUNPCKHMask(ShuffleVectorSDNode *N, bool V2IsSplat = false); |
Evan Cheng | 4fcb922 | 2006-03-28 02:43:26 +0000 | [diff] [blame] | 376 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 377 | /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form |
| 378 | /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, |
| 379 | /// <0, 0, 1, 1> |
Nate Begeman | 9008ca6 | 2009-04-27 18:41:29 +0000 | [diff] [blame] | 380 | bool isUNPCKL_v_undef_Mask(ShuffleVectorSDNode *N); |
Evan Cheng | 1d5a8cc | 2006-04-05 07:20:06 +0000 | [diff] [blame] | 381 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 382 | /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form |
| 383 | /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef, |
| 384 | /// <2, 2, 3, 3> |
Nate Begeman | 9008ca6 | 2009-04-27 18:41:29 +0000 | [diff] [blame] | 385 | bool isUNPCKH_v_undef_Mask(ShuffleVectorSDNode *N); |
Bill Wendling | 2f9bb1a | 2007-04-24 21:16:55 +0000 | [diff] [blame] | 386 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 387 | /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand |
| 388 | /// specifies a shuffle of elements that is suitable for input to MOVSS, |
| 389 | /// MOVSD, and MOVD, i.e. setting the lowest element. |
Nate Begeman | 9008ca6 | 2009-04-27 18:41:29 +0000 | [diff] [blame] | 390 | bool isMOVLMask(ShuffleVectorSDNode *N); |
Evan Cheng | d6d1cbd | 2006-04-11 00:19:04 +0000 | [diff] [blame] | 391 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 392 | /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand |
| 393 | /// specifies a shuffle of elements that is suitable for input to MOVSHDUP. |
Nate Begeman | 9008ca6 | 2009-04-27 18:41:29 +0000 | [diff] [blame] | 394 | bool isMOVSHDUPMask(ShuffleVectorSDNode *N); |
Evan Cheng | d953947 | 2006-04-14 21:59:03 +0000 | [diff] [blame] | 395 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 396 | /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand |
| 397 | /// specifies a shuffle of elements that is suitable for input to MOVSLDUP. |
Nate Begeman | 9008ca6 | 2009-04-27 18:41:29 +0000 | [diff] [blame] | 398 | bool isMOVSLDUPMask(ShuffleVectorSDNode *N); |
Evan Cheng | f686d9b | 2006-10-27 21:08:32 +0000 | [diff] [blame] | 399 | |
Evan Cheng | 0b457f0 | 2008-09-25 20:50:48 +0000 | [diff] [blame] | 400 | /// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand |
| 401 | /// specifies a shuffle of elements that is suitable for input to MOVDDUP. |
Nate Begeman | 9008ca6 | 2009-04-27 18:41:29 +0000 | [diff] [blame] | 402 | bool isMOVDDUPMask(ShuffleVectorSDNode *N); |
Evan Cheng | 0b457f0 | 2008-09-25 20:50:48 +0000 | [diff] [blame] | 403 | |
Nate Begeman | a09008b | 2009-10-19 02:17:23 +0000 | [diff] [blame] | 404 | /// isPALIGNRMask - Return true if the specified VECTOR_SHUFFLE operand |
| 405 | /// specifies a shuffle of elements that is suitable for input to PALIGNR. |
| 406 | bool isPALIGNRMask(ShuffleVectorSDNode *N); |
| 407 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 408 | /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle |
| 409 | /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP* |
| 410 | /// instructions. |
| 411 | unsigned getShuffleSHUFImmediate(SDNode *N); |
Evan Cheng | 506d3df | 2006-03-29 23:07:14 +0000 | [diff] [blame] | 412 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 413 | /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle |
Nate Begeman | a09008b | 2009-10-19 02:17:23 +0000 | [diff] [blame] | 414 | /// the specified VECTOR_SHUFFLE mask with PSHUFHW instruction. |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 415 | unsigned getShufflePSHUFHWImmediate(SDNode *N); |
Evan Cheng | 506d3df | 2006-03-29 23:07:14 +0000 | [diff] [blame] | 416 | |
Nate Begeman | a09008b | 2009-10-19 02:17:23 +0000 | [diff] [blame] | 417 | /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle |
| 418 | /// the specified VECTOR_SHUFFLE mask with PSHUFLW instruction. |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 419 | unsigned getShufflePSHUFLWImmediate(SDNode *N); |
Evan Cheng | 37b7387 | 2009-07-30 08:33:02 +0000 | [diff] [blame] | 420 | |
Nate Begeman | a09008b | 2009-10-19 02:17:23 +0000 | [diff] [blame] | 421 | /// getShufflePALIGNRImmediate - Return the appropriate immediate to shuffle |
| 422 | /// the specified VECTOR_SHUFFLE mask with the PALIGNR instruction. |
| 423 | unsigned getShufflePALIGNRImmediate(SDNode *N); |
| 424 | |
Evan Cheng | 37b7387 | 2009-07-30 08:33:02 +0000 | [diff] [blame] | 425 | /// isZeroNode - Returns true if Elt is a constant zero or a floating point |
| 426 | /// constant +0.0. |
| 427 | bool isZeroNode(SDValue Elt); |
Anton Korobeynikov | b5e0172 | 2009-08-05 23:01:26 +0000 | [diff] [blame] | 428 | |
| 429 | /// isOffsetSuitableForCodeModel - Returns true of the given offset can be |
| 430 | /// fit into displacement field of the instruction. |
| 431 | bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M, |
| 432 | bool hasSymbolicDisplacement = true); |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 433 | } |
| 434 | |
Chris Lattner | 9189777 | 2006-10-18 18:26:48 +0000 | [diff] [blame] | 435 | //===--------------------------------------------------------------------===// |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 436 | // X86TargetLowering - X86 Implementation of the TargetLowering interface |
| 437 | class X86TargetLowering : public TargetLowering { |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 438 | public: |
Dan Gohman | c9f5f3f | 2008-05-14 01:58:56 +0000 | [diff] [blame] | 439 | explicit X86TargetLowering(X86TargetMachine &TM); |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 440 | |
Chris Lattner | c64daab | 2010-01-26 05:02:42 +0000 | [diff] [blame] | 441 | virtual unsigned getJumpTableEncoding() const; |
Chris Lattner | 5e1df8d | 2010-01-25 23:38:14 +0000 | [diff] [blame] | 442 | |
Chris Lattner | c64daab | 2010-01-26 05:02:42 +0000 | [diff] [blame] | 443 | virtual const MCExpr * |
| 444 | LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, |
| 445 | const MachineBasicBlock *MBB, unsigned uid, |
| 446 | MCContext &Ctx) const; |
Michael J. Spencer | 6e56b18 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 447 | |
Evan Cheng | cc41586 | 2007-11-09 01:32:10 +0000 | [diff] [blame] | 448 | /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC |
| 449 | /// jumptable. |
Chris Lattner | c64daab | 2010-01-26 05:02:42 +0000 | [diff] [blame] | 450 | virtual SDValue getPICJumpTableRelocBase(SDValue Table, |
| 451 | SelectionDAG &DAG) const; |
Chris Lattner | 589c6f6 | 2010-01-26 06:28:43 +0000 | [diff] [blame] | 452 | virtual const MCExpr * |
| 453 | getPICJumpTableRelocBaseExpr(const MachineFunction *MF, |
| 454 | unsigned JTI, MCContext &Ctx) const; |
Michael J. Spencer | 6e56b18 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 455 | |
Chris Lattner | 54e3efd | 2007-02-26 04:01:25 +0000 | [diff] [blame] | 456 | /// getStackPtrReg - Return the stack pointer register we are using: either |
| 457 | /// ESP or RSP. |
| 458 | unsigned getStackPtrReg() const { return X86StackPtr; } |
Evan Cheng | 2928650 | 2008-01-23 23:17:41 +0000 | [diff] [blame] | 459 | |
| 460 | /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate |
| 461 | /// function arguments in the caller parameter area. For X86, aggregates |
| 462 | /// that contains are placed at 16-byte boundaries while the rest are at |
| 463 | /// 4-byte boundaries. |
| 464 | virtual unsigned getByValTypeAlignment(const Type *Ty) const; |
Evan Cheng | f0df031 | 2008-05-15 08:39:06 +0000 | [diff] [blame] | 465 | |
| 466 | /// getOptimalMemOpType - Returns the target specific optimal type for load |
Evan Cheng | f28f8bc | 2010-04-02 19:36:14 +0000 | [diff] [blame] | 467 | /// and store operations as a result of memset, memcpy, and memmove |
| 468 | /// lowering. If DstAlign is zero that means it's safe to destination |
| 469 | /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it |
| 470 | /// means there isn't a need to check it against alignment requirement, |
| 471 | /// probably because the source does not need to be loaded. If |
| 472 | /// 'NonScalarIntSafe' is true, that means it's safe to return a |
| 473 | /// non-scalar-integer type, e.g. empty string source, constant, or loaded |
Evan Cheng | c3b0c34 | 2010-04-08 07:37:57 +0000 | [diff] [blame] | 474 | /// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is |
| 475 | /// constant so it does not need to be loaded. |
Dan Gohman | 37f32ee | 2010-04-16 20:11:05 +0000 | [diff] [blame] | 476 | /// It returns EVT::Other if the type should be determined using generic |
| 477 | /// target-independent logic. |
Evan Cheng | f28f8bc | 2010-04-02 19:36:14 +0000 | [diff] [blame] | 478 | virtual EVT |
Evan Cheng | c3b0c34 | 2010-04-08 07:37:57 +0000 | [diff] [blame] | 479 | getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, |
| 480 | bool NonScalarIntSafe, bool MemcpyStrSrc, |
Dan Gohman | 37f32ee | 2010-04-16 20:11:05 +0000 | [diff] [blame] | 481 | MachineFunction &MF) const; |
Bill Wendling | af56634 | 2009-08-15 21:21:19 +0000 | [diff] [blame] | 482 | |
| 483 | /// allowsUnalignedMemoryAccesses - Returns true if the target allows |
| 484 | /// unaligned memory accesses. of the specified type. |
| 485 | virtual bool allowsUnalignedMemoryAccesses(EVT VT) const { |
| 486 | return true; |
| 487 | } |
Bill Wendling | 20c568f | 2009-06-30 22:38:32 +0000 | [diff] [blame] | 488 | |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 489 | /// LowerOperation - Provide custom lowering hooks for some operations. |
| 490 | /// |
Dan Gohman | d858e90 | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 491 | virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 492 | |
Duncan Sands | 1607f05 | 2008-12-01 11:39:25 +0000 | [diff] [blame] | 493 | /// ReplaceNodeResults - Replace the results of node with an illegal result |
| 494 | /// type with new values built out of custom code. |
Chris Lattner | 27a6c73 | 2007-11-24 07:07:01 +0000 | [diff] [blame] | 495 | /// |
Duncan Sands | 1607f05 | 2008-12-01 11:39:25 +0000 | [diff] [blame] | 496 | virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, |
Dan Gohman | d858e90 | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 497 | SelectionDAG &DAG) const; |
Chris Lattner | 27a6c73 | 2007-11-24 07:07:01 +0000 | [diff] [blame] | 498 | |
Michael J. Spencer | 6e56b18 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 499 | |
Dan Gohman | 475871a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 500 | virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; |
Evan Cheng | 206ee9d | 2006-07-07 08:33:52 +0000 | [diff] [blame] | 501 | |
Evan Cheng | e5b51ac | 2010-04-17 06:13:15 +0000 | [diff] [blame] | 502 | /// isTypeDesirableForOp - Return true if the target has native support for |
| 503 | /// the specified value type and it is 'desirable' to use the type for the |
| 504 | /// given node type. e.g. On x86 i16 is legal, but undesirable since i16 |
| 505 | /// instruction encodings are longer and some i16 instructions are slow. |
| 506 | virtual bool isTypeDesirableForOp(unsigned Opc, EVT VT) const; |
| 507 | |
| 508 | /// isTypeDesirable - Return true if the target has native support for the |
| 509 | /// specified value type and it is 'desirable' to use the type. e.g. On x86 |
| 510 | /// i16 is legal, but undesirable since i16 instruction encodings are longer |
| 511 | /// and some i16 instructions are slow. |
| 512 | virtual bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const; |
Evan Cheng | 64b7bf7 | 2010-04-16 06:14:10 +0000 | [diff] [blame] | 513 | |
Dan Gohman | af1d8ca | 2010-05-01 00:01:06 +0000 | [diff] [blame] | 514 | virtual MachineBasicBlock * |
| 515 | EmitInstrWithCustomInserter(MachineInstr *MI, |
| 516 | MachineBasicBlock *MBB) const; |
Evan Cheng | 4a46080 | 2006-01-11 00:33:36 +0000 | [diff] [blame] | 517 | |
Michael J. Spencer | 6e56b18 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 518 | |
Evan Cheng | 7226158 | 2005-12-20 06:22:03 +0000 | [diff] [blame] | 519 | /// getTargetNodeName - This method returns the name of a target specific |
| 520 | /// DAG node. |
| 521 | virtual const char *getTargetNodeName(unsigned Opcode) const; |
| 522 | |
Scott Michel | 5b8f82e | 2008-03-10 15:42:14 +0000 | [diff] [blame] | 523 | /// getSetCCResultType - Return the ISD::SETCC ValueType |
Owen Anderson | 825b72b | 2009-08-11 20:47:22 +0000 | [diff] [blame] | 524 | virtual MVT::SimpleValueType getSetCCResultType(EVT VT) const; |
Scott Michel | 5b8f82e | 2008-03-10 15:42:14 +0000 | [diff] [blame] | 525 | |
Michael J. Spencer | 6e56b18 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 526 | /// computeMaskedBitsForTargetNode - Determine which of the bits specified |
| 527 | /// in Mask are known to be either zero or one and return them in the |
Nate Begeman | 368e18d | 2006-02-16 21:11:51 +0000 | [diff] [blame] | 528 | /// KnownZero/KnownOne bitsets. |
Dan Gohman | 475871a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 529 | virtual void computeMaskedBitsForTargetNode(const SDValue Op, |
Dan Gohman | 977a76f | 2008-02-13 22:28:48 +0000 | [diff] [blame] | 530 | const APInt &Mask, |
Michael J. Spencer | 6e56b18 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 531 | APInt &KnownZero, |
Dan Gohman | fd29e0e | 2008-02-13 00:35:47 +0000 | [diff] [blame] | 532 | APInt &KnownOne, |
Dan Gohman | ea859be | 2007-06-22 14:59:07 +0000 | [diff] [blame] | 533 | const SelectionDAG &DAG, |
Nate Begeman | 368e18d | 2006-02-16 21:11:51 +0000 | [diff] [blame] | 534 | unsigned Depth = 0) const; |
Evan Cheng | ad4196b | 2008-05-12 19:56:52 +0000 | [diff] [blame] | 535 | |
Owen Anderson | bc146b0 | 2010-09-21 20:42:50 +0000 | [diff] [blame] | 536 | // ComputeNumSignBitsForTargetNode - Determine the number of bits in the |
| 537 | // operation that are sign bits. |
| 538 | virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, |
| 539 | unsigned Depth) const; |
| 540 | |
Evan Cheng | ad4196b | 2008-05-12 19:56:52 +0000 | [diff] [blame] | 541 | virtual bool |
Dan Gohman | 46510a7 | 2010-04-15 01:51:59 +0000 | [diff] [blame] | 542 | isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const; |
Michael J. Spencer | 6e56b18 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 543 | |
Dan Gohman | d858e90 | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 544 | SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const; |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 545 | |
Chris Lattner | b810565 | 2009-07-20 17:51:36 +0000 | [diff] [blame] | 546 | virtual bool ExpandInlineAsm(CallInst *CI) const; |
Michael J. Spencer | 6e56b18 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 547 | |
Chris Lattner | 4234f57 | 2007-03-25 02:14:49 +0000 | [diff] [blame] | 548 | ConstraintType getConstraintType(const std::string &Constraint) const; |
Michael J. Spencer | 6e56b18 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 549 | |
John Thompson | 44ab89e | 2010-10-29 17:29:13 +0000 | [diff] [blame] | 550 | /// Examine constraint string and operand type and determine a weight value. |
John Thompson | eac6e1d | 2010-09-13 18:15:37 +0000 | [diff] [blame] | 551 | /// The operand object must already have been set up with the operand type. |
John Thompson | 44ab89e | 2010-10-29 17:29:13 +0000 | [diff] [blame] | 552 | virtual ConstraintWeight getSingleConstraintMatchWeight( |
John Thompson | eac6e1d | 2010-09-13 18:15:37 +0000 | [diff] [blame] | 553 | AsmOperandInfo &info, const char *constraint) const; |
Michael J. Spencer | 6e56b18 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 554 | |
| 555 | std::vector<unsigned> |
Chris Lattner | 1efa40f | 2006-02-22 00:56:39 +0000 | [diff] [blame] | 556 | getRegClassForInlineAsmConstraint(const std::string &Constraint, |
Owen Anderson | e50ed30 | 2009-08-10 22:56:29 +0000 | [diff] [blame] | 557 | EVT VT) const; |
Chris Lattner | 48884cd | 2007-08-25 00:47:38 +0000 | [diff] [blame] | 558 | |
Owen Anderson | e50ed30 | 2009-08-10 22:56:29 +0000 | [diff] [blame] | 559 | virtual const char *LowerXConstraint(EVT ConstraintVT) const; |
Dale Johannesen | ba2a0b9 | 2008-01-29 02:21:21 +0000 | [diff] [blame] | 560 | |
Chris Lattner | 48884cd | 2007-08-25 00:47:38 +0000 | [diff] [blame] | 561 | /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops |
Evan Cheng | da43bcf | 2008-09-24 00:05:32 +0000 | [diff] [blame] | 562 | /// vector. If it is invalid, don't add anything to Ops. If hasMemory is |
| 563 | /// true it means one of the asm constraint of the inline asm instruction |
| 564 | /// being processed is 'm'. |
Dan Gohman | 475871a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 565 | virtual void LowerAsmOperandForConstraint(SDValue Op, |
Chris Lattner | 48884cd | 2007-08-25 00:47:38 +0000 | [diff] [blame] | 566 | char ConstraintLetter, |
Dan Gohman | 475871a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 567 | std::vector<SDValue> &Ops, |
Chris Lattner | 5e76423 | 2008-04-26 23:02:14 +0000 | [diff] [blame] | 568 | SelectionDAG &DAG) const; |
Michael J. Spencer | 6e56b18 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 569 | |
Chris Lattner | 9189777 | 2006-10-18 18:26:48 +0000 | [diff] [blame] | 570 | /// getRegForInlineAsmConstraint - Given a physical register constraint |
| 571 | /// (e.g. {edx}), return the register number and the register class for the |
| 572 | /// register. This should only be used for C_Register constraints. On |
| 573 | /// error, this returns a register number of 0. |
Michael J. Spencer | 6e56b18 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 574 | std::pair<unsigned, const TargetRegisterClass*> |
Chris Lattner | f76d180 | 2006-07-31 23:26:50 +0000 | [diff] [blame] | 575 | getRegForInlineAsmConstraint(const std::string &Constraint, |
Owen Anderson | e50ed30 | 2009-08-10 22:56:29 +0000 | [diff] [blame] | 576 | EVT VT) const; |
Michael J. Spencer | 6e56b18 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 577 | |
Chris Lattner | c9addb7 | 2007-03-30 23:15:24 +0000 | [diff] [blame] | 578 | /// isLegalAddressingMode - Return true if the addressing mode represented |
| 579 | /// by AM is legal for this target, for a load/store of the specified type. |
| 580 | virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const; |
| 581 | |
Evan Cheng | 2bd122c | 2007-10-26 01:56:11 +0000 | [diff] [blame] | 582 | /// isTruncateFree - Return true if it's free to truncate a value of |
| 583 | /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in |
| 584 | /// register EAX to i16 by referencing its sub-register AX. |
| 585 | virtual bool isTruncateFree(const Type *Ty1, const Type *Ty2) const; |
Owen Anderson | e50ed30 | 2009-08-10 22:56:29 +0000 | [diff] [blame] | 586 | virtual bool isTruncateFree(EVT VT1, EVT VT2) const; |
Dan Gohman | 97121ba | 2009-04-08 00:15:30 +0000 | [diff] [blame] | 587 | |
| 588 | /// isZExtFree - Return true if any actual instruction that defines a |
| 589 | /// value of type Ty1 implicit zero-extends the value to Ty2 in the result |
| 590 | /// register. This does not necessarily include registers defined in |
| 591 | /// unknown ways, such as incoming arguments, or copies from unknown |
| 592 | /// virtual registers. Also, if isTruncateFree(Ty2, Ty1) is true, this |
| 593 | /// does not necessarily apply to truncate instructions. e.g. on x86-64, |
| 594 | /// all instructions that define 32-bit values implicit zero-extend the |
| 595 | /// result out to 64 bits. |
| 596 | virtual bool isZExtFree(const Type *Ty1, const Type *Ty2) const; |
Owen Anderson | e50ed30 | 2009-08-10 22:56:29 +0000 | [diff] [blame] | 597 | virtual bool isZExtFree(EVT VT1, EVT VT2) const; |
Dan Gohman | 97121ba | 2009-04-08 00:15:30 +0000 | [diff] [blame] | 598 | |
Evan Cheng | 8b944d3 | 2009-05-28 00:35:15 +0000 | [diff] [blame] | 599 | /// isNarrowingProfitable - Return true if it's profitable to narrow |
| 600 | /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow |
| 601 | /// from i32 to i8 but not from i32 to i16. |
Owen Anderson | e50ed30 | 2009-08-10 22:56:29 +0000 | [diff] [blame] | 602 | virtual bool isNarrowingProfitable(EVT VT1, EVT VT2) const; |
Evan Cheng | 8b944d3 | 2009-05-28 00:35:15 +0000 | [diff] [blame] | 603 | |
Evan Cheng | eb2f969 | 2009-10-27 19:56:55 +0000 | [diff] [blame] | 604 | /// isFPImmLegal - Returns true if the target can instruction select the |
| 605 | /// specified FP immediate natively. If false, the legalizer will |
| 606 | /// materialize the FP immediate as a load from a constant pool. |
Evan Cheng | a1eaa3c | 2009-10-28 01:43:28 +0000 | [diff] [blame] | 607 | virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const; |
Evan Cheng | eb2f969 | 2009-10-27 19:56:55 +0000 | [diff] [blame] | 608 | |
Evan Cheng | 0188ecb | 2006-03-22 18:59:22 +0000 | [diff] [blame] | 609 | /// isShuffleMaskLegal - Targets can use this to indicate that they only |
| 610 | /// support *some* VECTOR_SHUFFLE operations, those with specific masks. |
Chris Lattner | 9189777 | 2006-10-18 18:26:48 +0000 | [diff] [blame] | 611 | /// By default, if a target supports the VECTOR_SHUFFLE node, all mask |
| 612 | /// values are assumed to be legal. |
Nate Begeman | 5a5ca15 | 2009-04-29 05:20:52 +0000 | [diff] [blame] | 613 | virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask, |
Owen Anderson | e50ed30 | 2009-08-10 22:56:29 +0000 | [diff] [blame] | 614 | EVT VT) const; |
Evan Cheng | 39623da | 2006-04-20 08:58:49 +0000 | [diff] [blame] | 615 | |
| 616 | /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is |
| 617 | /// used by Targets can use this to indicate if there is a suitable |
| 618 | /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant |
| 619 | /// pool entry. |
Nate Begeman | 5a5ca15 | 2009-04-29 05:20:52 +0000 | [diff] [blame] | 620 | virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask, |
Owen Anderson | e50ed30 | 2009-08-10 22:56:29 +0000 | [diff] [blame] | 621 | EVT VT) const; |
Evan Cheng | 6fd599f | 2008-03-05 01:30:59 +0000 | [diff] [blame] | 622 | |
| 623 | /// ShouldShrinkFPConstant - If true, then instruction selection should |
| 624 | /// seek to shrink the FP constant of the specified type to a smaller type |
| 625 | /// in order to save space and / or reduce runtime. |
Owen Anderson | e50ed30 | 2009-08-10 22:56:29 +0000 | [diff] [blame] | 626 | virtual bool ShouldShrinkFPConstant(EVT VT) const { |
Evan Cheng | 6fd599f | 2008-03-05 01:30:59 +0000 | [diff] [blame] | 627 | // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more |
| 628 | // expensive than a straight movsd. On the other hand, it's important to |
| 629 | // shrink long double fp constant since fldt is very slow. |
Owen Anderson | 825b72b | 2009-08-11 20:47:22 +0000 | [diff] [blame] | 630 | return !X86ScalarSSEf64 || VT == MVT::f80; |
Evan Cheng | 6fd599f | 2008-03-05 01:30:59 +0000 | [diff] [blame] | 631 | } |
Michael J. Spencer | 6e56b18 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 632 | |
Dan Gohman | 419e4f9 | 2010-05-11 16:21:03 +0000 | [diff] [blame] | 633 | const X86Subtarget* getSubtarget() const { |
Dan Gohman | 707e018 | 2008-04-12 04:36:06 +0000 | [diff] [blame] | 634 | return Subtarget; |
Rafael Espindola | f1ba1ca | 2007-11-05 23:12:20 +0000 | [diff] [blame] | 635 | } |
| 636 | |
Chris Lattner | 3d66185 | 2008-01-18 06:52:41 +0000 | [diff] [blame] | 637 | /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is |
| 638 | /// computed in an SSE register, not on the X87 floating point stack. |
Owen Anderson | e50ed30 | 2009-08-10 22:56:29 +0000 | [diff] [blame] | 639 | bool isScalarFPTypeInSSEReg(EVT VT) const { |
Owen Anderson | 825b72b | 2009-08-11 20:47:22 +0000 | [diff] [blame] | 640 | return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2 |
| 641 | (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1 |
Chris Lattner | 3d66185 | 2008-01-18 06:52:41 +0000 | [diff] [blame] | 642 | } |
Dan Gohman | d9f3c48 | 2008-08-19 21:32:53 +0000 | [diff] [blame] | 643 | |
| 644 | /// createFastISel - This method returns a target specific FastISel object, |
| 645 | /// or null if the target does not support "fast" ISel. |
Dan Gohman | a4160c3 | 2010-07-07 16:29:44 +0000 | [diff] [blame] | 646 | virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo) const; |
Bill Wendling | 20c568f | 2009-06-30 22:38:32 +0000 | [diff] [blame] | 647 | |
Bill Wendling | b4202b8 | 2009-07-01 18:50:55 +0000 | [diff] [blame] | 648 | /// getFunctionAlignment - Return the Log2 alignment of this function. |
Bill Wendling | 20c568f | 2009-06-30 22:38:32 +0000 | [diff] [blame] | 649 | virtual unsigned getFunctionAlignment(const Function *F) const; |
| 650 | |
Evan Cheng | 70017e4 | 2010-07-24 00:39:05 +0000 | [diff] [blame] | 651 | unsigned getRegPressureLimit(const TargetRegisterClass *RC, |
| 652 | MachineFunction &MF) const; |
| 653 | |
Eric Christopher | f7a0c7b | 2010-07-06 05:18:56 +0000 | [diff] [blame] | 654 | /// getStackCookieLocation - Return true if the target stores stack |
| 655 | /// protector cookies at a fixed offset in some non-standard address |
| 656 | /// space, and populates the address space and offset as |
| 657 | /// appropriate. |
| 658 | virtual bool getStackCookieLocation(unsigned &AddressSpace, unsigned &Offset) const; |
| 659 | |
Evan Cheng | dee8101 | 2010-07-26 21:50:05 +0000 | [diff] [blame] | 660 | protected: |
| 661 | std::pair<const TargetRegisterClass*, uint8_t> |
| 662 | findRepresentativeClass(EVT VT) const; |
| 663 | |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 664 | private: |
Evan Cheng | 0db9fe6 | 2006-04-25 20:13:52 +0000 | [diff] [blame] | 665 | /// Subtarget - Keep a pointer to the X86Subtarget around so that we can |
| 666 | /// make the right decision when generating code for different targets. |
| 667 | const X86Subtarget *Subtarget; |
Dan Gohman | c9f5f3f | 2008-05-14 01:58:56 +0000 | [diff] [blame] | 668 | const X86RegisterInfo *RegInfo; |
Anton Korobeynikov | bff66b0 | 2008-09-09 18:22:57 +0000 | [diff] [blame] | 669 | const TargetData *TD; |
Evan Cheng | 0db9fe6 | 2006-04-25 20:13:52 +0000 | [diff] [blame] | 670 | |
Evan Cheng | 25ab690 | 2006-09-08 06:48:29 +0000 | [diff] [blame] | 671 | /// X86StackPtr - X86 physical register used as stack ptr. |
| 672 | unsigned X86StackPtr; |
Michael J. Spencer | 6e56b18 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 673 | |
| 674 | /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87 |
Dale Johannesen | f1fc3a8 | 2007-09-23 14:52:20 +0000 | [diff] [blame] | 675 | /// floating point ops. |
| 676 | /// When SSE is available, use it for f32 operations. |
| 677 | /// When SSE2 is available, use it for f64 operations. |
| 678 | bool X86ScalarSSEf32; |
| 679 | bool X86ScalarSSEf64; |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 680 | |
Evan Cheng | eb2f969 | 2009-10-27 19:56:55 +0000 | [diff] [blame] | 681 | /// LegalFPImmediates - A list of legal fp immediates. |
| 682 | std::vector<APFloat> LegalFPImmediates; |
| 683 | |
| 684 | /// addLegalFPImmediate - Indicate that this x86 target can instruction |
| 685 | /// select the specified FP immediate natively. |
| 686 | void addLegalFPImmediate(const APFloat& Imm) { |
| 687 | LegalFPImmediates.push_back(Imm); |
| 688 | } |
| 689 | |
Dan Gohman | 98ca4f2 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 690 | SDValue LowerCallResult(SDValue Chain, SDValue InFlag, |
Sandeep Patel | 65c3c8f | 2009-09-02 08:44:58 +0000 | [diff] [blame] | 691 | CallingConv::ID CallConv, bool isVarArg, |
Dan Gohman | 98ca4f2 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 692 | const SmallVectorImpl<ISD::InputArg> &Ins, |
| 693 | DebugLoc dl, SelectionDAG &DAG, |
Dan Gohman | d858e90 | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 694 | SmallVectorImpl<SDValue> &InVals) const; |
Dan Gohman | 98ca4f2 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 695 | SDValue LowerMemArgument(SDValue Chain, |
Sandeep Patel | 65c3c8f | 2009-09-02 08:44:58 +0000 | [diff] [blame] | 696 | CallingConv::ID CallConv, |
Dan Gohman | 98ca4f2 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 697 | const SmallVectorImpl<ISD::InputArg> &ArgInfo, |
| 698 | DebugLoc dl, SelectionDAG &DAG, |
| 699 | const CCValAssign &VA, MachineFrameInfo *MFI, |
Dan Gohman | d858e90 | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 700 | unsigned i) const; |
Dan Gohman | 98ca4f2 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 701 | SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg, |
| 702 | DebugLoc dl, SelectionDAG &DAG, |
| 703 | const CCValAssign &VA, |
Dan Gohman | d858e90 | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 704 | ISD::ArgFlagsTy Flags) const; |
Rafael Espindola | 1b5dcc3 | 2007-08-31 15:06:30 +0000 | [diff] [blame] | 705 | |
Gordon Henriksen | 8673766 | 2008-01-05 16:56:59 +0000 | [diff] [blame] | 706 | // Call lowering helpers. |
Evan Cheng | 0c439eb | 2010-01-27 00:07:07 +0000 | [diff] [blame] | 707 | |
| 708 | /// IsEligibleForTailCallOptimization - Check whether the call is eligible |
| 709 | /// for tail call optimization. Targets which want to do tail call |
| 710 | /// optimization should implement this function. |
Evan Cheng | 022d9e1 | 2010-02-02 23:55:14 +0000 | [diff] [blame] | 711 | bool IsEligibleForTailCallOptimization(SDValue Callee, |
Evan Cheng | 0c439eb | 2010-01-27 00:07:07 +0000 | [diff] [blame] | 712 | CallingConv::ID CalleeCC, |
| 713 | bool isVarArg, |
Evan Cheng | a375d47 | 2010-03-15 18:54:48 +0000 | [diff] [blame] | 714 | bool isCalleeStructRet, |
| 715 | bool isCallerStructRet, |
Evan Cheng | b171245 | 2010-01-27 06:25:16 +0000 | [diff] [blame] | 716 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
Dan Gohman | c940365 | 2010-07-07 15:54:55 +0000 | [diff] [blame] | 717 | const SmallVectorImpl<SDValue> &OutVals, |
Evan Cheng | b171245 | 2010-01-27 06:25:16 +0000 | [diff] [blame] | 718 | const SmallVectorImpl<ISD::InputArg> &Ins, |
Evan Cheng | 0c439eb | 2010-01-27 00:07:07 +0000 | [diff] [blame] | 719 | SelectionDAG& DAG) const; |
Dan Gohman | d858e90 | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 720 | bool IsCalleePop(bool isVarArg, CallingConv::ID CallConv) const; |
Dan Gohman | 475871a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 721 | SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr, |
| 722 | SDValue Chain, bool IsTailCall, bool Is64Bit, |
Dan Gohman | d858e90 | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 723 | int FPDiff, DebugLoc dl) const; |
Arnold Schwaighofer | 4b5324a | 2008-04-12 18:11:06 +0000 | [diff] [blame] | 724 | |
Dan Gohman | d858e90 | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 725 | unsigned GetAlignedArgumentStackSize(unsigned StackSize, |
| 726 | SelectionDAG &DAG) const; |
Evan Cheng | 559806f | 2006-01-27 08:10:46 +0000 | [diff] [blame] | 727 | |
Eli Friedman | 948e95a | 2009-05-23 09:59:16 +0000 | [diff] [blame] | 728 | std::pair<SDValue,SDValue> FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, |
Dan Gohman | d858e90 | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 729 | bool isSigned) const; |
Evan Cheng | c363094 | 2009-12-09 21:00:30 +0000 | [diff] [blame] | 730 | |
| 731 | SDValue LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl, |
Dan Gohman | d858e90 | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 732 | SelectionDAG &DAG) const; |
| 733 | SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; |
| 734 | SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const; |
| 735 | SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; |
| 736 | SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; |
| 737 | SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) const; |
| 738 | SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; |
| 739 | SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) const; |
| 740 | SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const; |
| 741 | SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; |
| 742 | SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; |
Dale Johannesen | 33c960f | 2009-02-04 20:06:27 +0000 | [diff] [blame] | 743 | SDValue LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl, |
| 744 | int64_t Offset, SelectionDAG &DAG) const; |
Dan Gohman | d858e90 | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 745 | SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; |
| 746 | SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; |
| 747 | SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const; |
| 748 | SDValue LowerShift(SDValue Op, SelectionDAG &DAG) const; |
Owen Anderson | e50ed30 | 2009-08-10 22:56:29 +0000 | [diff] [blame] | 749 | SDValue BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot, |
Dan Gohman | d858e90 | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 750 | SelectionDAG &DAG) const; |
Wesley Peck | bf17cfa | 2010-11-23 03:31:01 +0000 | [diff] [blame] | 751 | SDValue LowerBITCAST(SDValue op, SelectionDAG &DAG) const; |
Dan Gohman | d858e90 | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 752 | SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; |
| 753 | SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; |
| 754 | SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) const; |
| 755 | SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG) const; |
| 756 | SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const; |
| 757 | SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const; |
| 758 | SDValue LowerFABS(SDValue Op, SelectionDAG &DAG) const; |
| 759 | SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG) const; |
| 760 | SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const; |
Evan Cheng | 5528e7b | 2010-04-21 01:47:12 +0000 | [diff] [blame] | 761 | SDValue LowerToBT(SDValue And, ISD::CondCode CC, |
| 762 | DebugLoc dl, SelectionDAG &DAG) const; |
Dan Gohman | d858e90 | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 763 | SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const; |
| 764 | SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const; |
| 765 | SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const; |
| 766 | SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const; |
| 767 | SDValue LowerMEMSET(SDValue Op, SelectionDAG &DAG) const; |
| 768 | SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; |
| 769 | SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; |
| 770 | SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; |
| 771 | SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const; |
| 772 | SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const; |
| 773 | SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; |
| 774 | SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; |
| 775 | SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; |
| 776 | SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const; |
| 777 | SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const; |
| 778 | SDValue LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG) const; |
| 779 | SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; |
| 780 | SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) const; |
| 781 | SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) const; |
| 782 | SDValue LowerMUL_V2I64(SDValue Op, SelectionDAG &DAG) const; |
Nate Begeman | bdcb5af | 2010-07-27 22:37:06 +0000 | [diff] [blame] | 783 | SDValue LowerSHL(SDValue Op, SelectionDAG &DAG) const; |
Dan Gohman | d858e90 | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 784 | SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) const; |
Bill Wendling | 41ea7e7 | 2008-11-24 19:21:46 +0000 | [diff] [blame] | 785 | |
Dan Gohman | d858e90 | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 786 | SDValue LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const; |
| 787 | SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const; |
| 788 | SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const; |
Eric Christopher | 9a9d275 | 2010-07-22 02:48:34 +0000 | [diff] [blame] | 789 | SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const; |
Duncan Sands | 1607f05 | 2008-12-01 11:39:25 +0000 | [diff] [blame] | 790 | |
Bruno Cardoso Lopes | bf8154a | 2010-08-21 01:32:18 +0000 | [diff] [blame] | 791 | // Utility functions to help LowerVECTOR_SHUFFLE |
| 792 | SDValue LowerVECTOR_SHUFFLEv8i16(SDValue Op, SelectionDAG &DAG) const; |
| 793 | |
Dan Gohman | 98ca4f2 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 794 | virtual SDValue |
| 795 | LowerFormalArguments(SDValue Chain, |
Sandeep Patel | 65c3c8f | 2009-09-02 08:44:58 +0000 | [diff] [blame] | 796 | CallingConv::ID CallConv, bool isVarArg, |
Dan Gohman | 98ca4f2 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 797 | const SmallVectorImpl<ISD::InputArg> &Ins, |
| 798 | DebugLoc dl, SelectionDAG &DAG, |
Dan Gohman | d858e90 | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 799 | SmallVectorImpl<SDValue> &InVals) const; |
Dan Gohman | 98ca4f2 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 800 | virtual SDValue |
Evan Cheng | 022d9e1 | 2010-02-02 23:55:14 +0000 | [diff] [blame] | 801 | LowerCall(SDValue Chain, SDValue Callee, |
Evan Cheng | 0c439eb | 2010-01-27 00:07:07 +0000 | [diff] [blame] | 802 | CallingConv::ID CallConv, bool isVarArg, bool &isTailCall, |
Dan Gohman | 98ca4f2 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 803 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
Dan Gohman | c940365 | 2010-07-07 15:54:55 +0000 | [diff] [blame] | 804 | const SmallVectorImpl<SDValue> &OutVals, |
Dan Gohman | 98ca4f2 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 805 | const SmallVectorImpl<ISD::InputArg> &Ins, |
| 806 | DebugLoc dl, SelectionDAG &DAG, |
Dan Gohman | d858e90 | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 807 | SmallVectorImpl<SDValue> &InVals) const; |
Dan Gohman | 98ca4f2 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 808 | |
| 809 | virtual SDValue |
| 810 | LowerReturn(SDValue Chain, |
Sandeep Patel | 65c3c8f | 2009-09-02 08:44:58 +0000 | [diff] [blame] | 811 | CallingConv::ID CallConv, bool isVarArg, |
Dan Gohman | 98ca4f2 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 812 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
Dan Gohman | c940365 | 2010-07-07 15:54:55 +0000 | [diff] [blame] | 813 | const SmallVectorImpl<SDValue> &OutVals, |
Dan Gohman | d858e90 | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 814 | DebugLoc dl, SelectionDAG &DAG) const; |
Dan Gohman | 98ca4f2 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 815 | |
Evan Cheng | 3d2125c | 2010-11-30 23:55:39 +0000 | [diff] [blame] | 816 | virtual bool isUsedByReturnOnly(SDNode *N) const; |
| 817 | |
Kenneth Uildriks | b4997ae | 2009-11-07 02:11:54 +0000 | [diff] [blame] | 818 | virtual bool |
| 819 | CanLowerReturn(CallingConv::ID CallConv, bool isVarArg, |
Dan Gohman | 84023e0 | 2010-07-10 09:00:22 +0000 | [diff] [blame] | 820 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
Dan Gohman | c9af33c | 2010-07-06 22:19:37 +0000 | [diff] [blame] | 821 | LLVMContext &Context) const; |
Kenneth Uildriks | b4997ae | 2009-11-07 02:11:54 +0000 | [diff] [blame] | 822 | |
Duncan Sands | 1607f05 | 2008-12-01 11:39:25 +0000 | [diff] [blame] | 823 | void ReplaceATOMIC_BINARY_64(SDNode *N, SmallVectorImpl<SDValue> &Results, |
Dan Gohman | d858e90 | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 824 | SelectionDAG &DAG, unsigned NewOp) const; |
Duncan Sands | 1607f05 | 2008-12-01 11:39:25 +0000 | [diff] [blame] | 825 | |
Eric Christopher | b120ab4 | 2009-08-18 22:50:32 +0000 | [diff] [blame] | 826 | /// Utility function to emit string processing sse4.2 instructions |
| 827 | /// that return in xmm0. |
Evan Cheng | 431f775 | 2009-09-19 10:09:15 +0000 | [diff] [blame] | 828 | /// This takes the instruction to expand, the associated machine basic |
| 829 | /// block, the number of args, and whether or not the second arg is |
| 830 | /// in memory or not. |
Eric Christopher | b120ab4 | 2009-08-18 22:50:32 +0000 | [diff] [blame] | 831 | MachineBasicBlock *EmitPCMP(MachineInstr *BInstr, MachineBasicBlock *BB, |
Mon P Wang | 20adc9d | 2010-04-04 03:10:48 +0000 | [diff] [blame] | 832 | unsigned argNum, bool inMem) const; |
Eric Christopher | b120ab4 | 2009-08-18 22:50:32 +0000 | [diff] [blame] | 833 | |
Eric Christopher | 228232b | 2010-11-30 07:20:12 +0000 | [diff] [blame] | 834 | /// Utility functions to emit monitor and mwait instructions. These |
| 835 | /// need to make sure that the arguments to the intrinsic are in the |
| 836 | /// correct registers. |
Eric Christopher | 82be220 | 2010-11-30 08:10:28 +0000 | [diff] [blame] | 837 | MachineBasicBlock *EmitMonitor(MachineInstr *MI, |
| 838 | MachineBasicBlock *BB) const; |
Eric Christopher | 228232b | 2010-11-30 07:20:12 +0000 | [diff] [blame] | 839 | MachineBasicBlock *EmitMwait(MachineInstr *MI, MachineBasicBlock *BB) const; |
| 840 | |
Mon P Wang | 63307c3 | 2008-05-05 19:05:59 +0000 | [diff] [blame] | 841 | /// Utility function to emit atomic bitwise operations (and, or, xor). |
Evan Cheng | 431f775 | 2009-09-19 10:09:15 +0000 | [diff] [blame] | 842 | /// It takes the bitwise instruction to expand, the associated machine basic |
| 843 | /// block, and the associated X86 opcodes for reg/reg and reg/imm. |
Mon P Wang | 63307c3 | 2008-05-05 19:05:59 +0000 | [diff] [blame] | 844 | MachineBasicBlock *EmitAtomicBitwiseWithCustomInserter( |
| 845 | MachineInstr *BInstr, |
| 846 | MachineBasicBlock *BB, |
| 847 | unsigned regOpc, |
Andrew Lenharth | 507a58a | 2008-06-14 05:48:15 +0000 | [diff] [blame] | 848 | unsigned immOpc, |
Dale Johannesen | 140be2d | 2008-08-19 18:47:28 +0000 | [diff] [blame] | 849 | unsigned loadOpc, |
| 850 | unsigned cxchgOpc, |
Dale Johannesen | 140be2d | 2008-08-19 18:47:28 +0000 | [diff] [blame] | 851 | unsigned notOpc, |
| 852 | unsigned EAXreg, |
| 853 | TargetRegisterClass *RC, |
Dan Gohman | 1fdbc1d | 2009-02-07 16:15:20 +0000 | [diff] [blame] | 854 | bool invSrc = false) const; |
Dale Johannesen | 48c1bc2 | 2008-10-02 18:53:47 +0000 | [diff] [blame] | 855 | |
| 856 | MachineBasicBlock *EmitAtomicBit6432WithCustomInserter( |
| 857 | MachineInstr *BInstr, |
| 858 | MachineBasicBlock *BB, |
| 859 | unsigned regOpcL, |
| 860 | unsigned regOpcH, |
| 861 | unsigned immOpcL, |
| 862 | unsigned immOpcH, |
Dan Gohman | 1fdbc1d | 2009-02-07 16:15:20 +0000 | [diff] [blame] | 863 | bool invSrc = false) const; |
Michael J. Spencer | 6e56b18 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 864 | |
Mon P Wang | 63307c3 | 2008-05-05 19:05:59 +0000 | [diff] [blame] | 865 | /// Utility function to emit atomic min and max. It takes the min/max |
Bill Wendling | bddc442 | 2009-03-26 01:46:56 +0000 | [diff] [blame] | 866 | /// instruction to expand, the associated basic block, and the associated |
| 867 | /// cmov opcode for moving the min or max value. |
Mon P Wang | 63307c3 | 2008-05-05 19:05:59 +0000 | [diff] [blame] | 868 | MachineBasicBlock *EmitAtomicMinMaxWithCustomInserter(MachineInstr *BInstr, |
| 869 | MachineBasicBlock *BB, |
Dan Gohman | 1fdbc1d | 2009-02-07 16:15:20 +0000 | [diff] [blame] | 870 | unsigned cmovOpc) const; |
Dan Gohman | 076aee3 | 2009-03-04 19:44:21 +0000 | [diff] [blame] | 871 | |
Dan Gohman | 320afb8 | 2010-10-12 18:00:49 +0000 | [diff] [blame] | 872 | // Utility function to emit the low-level va_arg code for X86-64. |
| 873 | MachineBasicBlock *EmitVAARG64WithCustomInserter( |
| 874 | MachineInstr *MI, |
| 875 | MachineBasicBlock *MBB) const; |
| 876 | |
Dan Gohman | d6708ea | 2009-08-15 01:38:56 +0000 | [diff] [blame] | 877 | /// Utility function to emit the xmm reg save portion of va_start. |
| 878 | MachineBasicBlock *EmitVAStartSaveXMMRegsWithCustomInserter( |
| 879 | MachineInstr *BInstr, |
| 880 | MachineBasicBlock *BB) const; |
| 881 | |
Chris Lattner | 5260097 | 2009-09-02 05:57:00 +0000 | [diff] [blame] | 882 | MachineBasicBlock *EmitLoweredSelect(MachineInstr *I, |
Dan Gohman | af1d8ca | 2010-05-01 00:01:06 +0000 | [diff] [blame] | 883 | MachineBasicBlock *BB) const; |
Anton Korobeynikov | 043f3c2 | 2010-03-06 19:32:29 +0000 | [diff] [blame] | 884 | |
Michael J. Spencer | e9c253e | 2010-10-21 01:41:01 +0000 | [diff] [blame] | 885 | MachineBasicBlock *EmitLoweredWinAlloca(MachineInstr *MI, |
Dan Gohman | af1d8ca | 2010-05-01 00:01:06 +0000 | [diff] [blame] | 886 | MachineBasicBlock *BB) const; |
Michael J. Spencer | 6e56b18 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 887 | |
Eric Christopher | 30ef0e5 | 2010-06-03 04:07:48 +0000 | [diff] [blame] | 888 | MachineBasicBlock *EmitLoweredTLSCall(MachineInstr *MI, |
| 889 | MachineBasicBlock *BB) const; |
Anton Korobeynikov | 043f3c2 | 2010-03-06 19:32:29 +0000 | [diff] [blame] | 890 | |
Rafael Espindola | 5bf7c53 | 2010-11-27 20:43:02 +0000 | [diff] [blame] | 891 | MachineBasicBlock *emitLoweredTLSAddr(MachineInstr *MI, |
| 892 | MachineBasicBlock *BB) const; |
| 893 | |
Dan Gohman | 076aee3 | 2009-03-04 19:44:21 +0000 | [diff] [blame] | 894 | /// Emit nodes that will be selected as "test Op0,Op0", or something |
Dan Gohman | 3112581 | 2009-03-07 01:58:32 +0000 | [diff] [blame] | 895 | /// equivalent, for use with the given x86 condition code. |
Evan Cheng | 552f09a | 2010-04-26 19:06:11 +0000 | [diff] [blame] | 896 | SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG) const; |
Dan Gohman | 076aee3 | 2009-03-04 19:44:21 +0000 | [diff] [blame] | 897 | |
| 898 | /// Emit nodes that will be selected as "cmp Op0,Op1", or something |
Dan Gohman | 3112581 | 2009-03-07 01:58:32 +0000 | [diff] [blame] | 899 | /// equivalent, for use with the given x86 condition code. |
Evan Cheng | 552f09a | 2010-04-26 19:06:11 +0000 | [diff] [blame] | 900 | SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, |
Dan Gohman | d858e90 | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 901 | SelectionDAG &DAG) const; |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 902 | }; |
Evan Cheng | c3f44b0 | 2008-09-03 00:03:49 +0000 | [diff] [blame] | 903 | |
| 904 | namespace X86 { |
Dan Gohman | a4160c3 | 2010-07-07 16:29:44 +0000 | [diff] [blame] | 905 | FastISel *createFastISel(FunctionLoweringInfo &funcInfo); |
Evan Cheng | c3f44b0 | 2008-09-03 00:03:49 +0000 | [diff] [blame] | 906 | } |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 907 | } |
| 908 | |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 909 | #endif // X86ISELLOWERING_H |