Chris Lattner | 76ac068 | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 1 | //===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
Chris Lattner | f3ebc3f | 2007-12-29 20:36:04 +0000 | [diff] [blame] | 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
Chris Lattner | 76ac068 | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file defines the interfaces that X86 uses to lower LLVM code into a |
| 11 | // selection DAG. |
| 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
| 15 | #ifndef X86ISELLOWERING_H |
| 16 | #define X86ISELLOWERING_H |
| 17 | |
Gordon Henriksen | 9231958 | 2008-01-05 16:56:59 +0000 | [diff] [blame] | 18 | #include "X86MachineFunctionInfo.h" |
Chandler Carruth | 802d755 | 2012-12-04 07:12:27 +0000 | [diff] [blame] | 19 | #include "X86RegisterInfo.h" |
| 20 | #include "X86Subtarget.h" |
| 21 | #include "llvm/CodeGen/CallingConvLower.h" |
Ted Kremenek | 2175b55 | 2008-09-03 02:54:11 +0000 | [diff] [blame] | 22 | #include "llvm/CodeGen/FastISel.h" |
Chris Lattner | 76ac068 | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 23 | #include "llvm/CodeGen/SelectionDAG.h" |
Chandler Carruth | 802d755 | 2012-12-04 07:12:27 +0000 | [diff] [blame] | 24 | #include "llvm/Target/TargetLowering.h" |
| 25 | #include "llvm/Target/TargetOptions.h" |
| 26 | #include "llvm/Target/TargetTransformImpl.h" |
Chris Lattner | 76ac068 | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 27 | |
| 28 | namespace llvm { |
Chris Lattner | 76ac068 | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 29 | namespace X86ISD { |
Evan Cheng | 172fce7 | 2006-01-06 00:43:03 +0000 | [diff] [blame] | 30 | // X86 Specific DAG Nodes |
Chris Lattner | 76ac068 | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 31 | enum NodeType { |
| 32 | // Start the numbering where the builtin ops leave off. |
Dan Gohman | ed1cf1a | 2008-09-23 18:42:32 +0000 | [diff] [blame] | 33 | FIRST_NUMBER = ISD::BUILTIN_OP_END, |
Chris Lattner | 76ac068 | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 34 | |
Evan Cheng | e9fbc3f | 2007-12-14 02:13:44 +0000 | [diff] [blame] | 35 | /// BSF - Bit scan forward. |
| 36 | /// BSR - Bit scan reverse. |
| 37 | BSF, |
| 38 | BSR, |
| 39 | |
Evan Cheng | 9c249c3 | 2006-01-09 18:33:28 +0000 | [diff] [blame] | 40 | /// SHLD, SHRD - Double shift instructions. These correspond to |
| 41 | /// X86::SHLDxx and X86::SHRDxx instructions. |
| 42 | SHLD, |
| 43 | SHRD, |
| 44 | |
Evan Cheng | 2dd217b | 2006-01-31 03:14:29 +0000 | [diff] [blame] | 45 | /// FAND - Bitwise logical AND of floating point values. This corresponds |
| 46 | /// to X86::ANDPS or X86::ANDPD. |
| 47 | FAND, |
| 48 | |
Evan Cheng | 4363e88 | 2007-01-05 07:55:56 +0000 | [diff] [blame] | 49 | /// FOR - Bitwise logical OR of floating point values. This corresponds |
| 50 | /// to X86::ORPS or X86::ORPD. |
| 51 | FOR, |
| 52 | |
Evan Cheng | 72d5c25 | 2006-01-31 22:28:30 +0000 | [diff] [blame] | 53 | /// FXOR - Bitwise logical XOR of floating point values. This corresponds |
| 54 | /// to X86::XORPS or X86::XORPD. |
| 55 | FXOR, |
| 56 | |
Evan Cheng | 82241c8 | 2007-01-05 21:37:56 +0000 | [diff] [blame] | 57 | /// FSRL - Bitwise logical right shift of floating point values. These |
| 58 | /// corresponds to X86::PSRLDQ. |
Evan Cheng | 4363e88 | 2007-01-05 07:55:56 +0000 | [diff] [blame] | 59 | FSRL, |
| 60 | |
Dan Gohman | f9bbcd1 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 61 | /// CALL - These operations represent an abstract X86 call |
Chris Lattner | 76ac068 | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 62 | /// instruction, which includes a bunch of information. In particular the |
| 63 | /// operands of these node are: |
| 64 | /// |
| 65 | /// #0 - The incoming token chain |
| 66 | /// #1 - The callee |
| 67 | /// #2 - The number of arg bytes the caller pushes on the stack. |
| 68 | /// #3 - The number of arg bytes the callee pops off the stack. |
| 69 | /// #4 - The value to pass in AL/AX/EAX (optional) |
| 70 | /// #5 - The value to pass in DL/DX/EDX (optional) |
| 71 | /// |
| 72 | /// The result values of these nodes are: |
| 73 | /// |
| 74 | /// #0 - The outgoing token chain |
| 75 | /// #1 - The first register result value (optional) |
| 76 | /// #2 - The second register result value (optional) |
| 77 | /// |
Chris Lattner | 76ac068 | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 78 | CALL, |
Dan Gohman | f9bbcd1 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 79 | |
Michael J. Spencer | 9cafc87 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 80 | /// RDTSC_DAG - This operation implements the lowering for |
Andrew Lenharth | 0bf68ae | 2005-11-20 21:41:10 +0000 | [diff] [blame] | 81 | /// readcyclecounter |
| 82 | RDTSC_DAG, |
Evan Cheng | 225a4d0 | 2005-12-17 01:21:05 +0000 | [diff] [blame] | 83 | |
| 84 | /// X86 compare and logical compare instructions. |
Evan Cheng | 8070099 | 2007-09-17 17:42:53 +0000 | [diff] [blame] | 85 | CMP, COMI, UCOMI, |
Evan Cheng | 225a4d0 | 2005-12-17 01:21:05 +0000 | [diff] [blame] | 86 | |
Dan Gohman | 25a767d | 2008-12-23 22:45:23 +0000 | [diff] [blame] | 87 | /// X86 bit-test instructions. |
| 88 | BT, |
| 89 | |
Chris Lattner | 846c20d | 2010-12-20 00:59:46 +0000 | [diff] [blame] | 90 | /// X86 SetCC. Operand 0 is condition code, and operand 1 is the EFLAGS |
| 91 | /// operand, usually produced by a CMP instruction. |
Evan Cheng | c1583db | 2005-12-21 20:21:51 +0000 | [diff] [blame] | 92 | SETCC, |
| 93 | |
Evan Cheng | 0e8b9e3 | 2009-12-15 00:53:42 +0000 | [diff] [blame] | 94 | // Same as SETCC except it's materialized with a sbb and the value is all |
| 95 | // one's or all zero's. |
Chris Lattner | 9edf3f5 | 2010-12-19 22:08:31 +0000 | [diff] [blame] | 96 | SETCC_CARRY, // R = carry_bit ? ~0 : 0 |
Evan Cheng | 0e8b9e3 | 2009-12-15 00:53:42 +0000 | [diff] [blame] | 97 | |
Stuart Hastings | be60549 | 2011-06-03 23:53:54 +0000 | [diff] [blame] | 98 | /// X86 FP SETCC, implemented with CMP{cc}SS/CMP{cc}SD. |
| 99 | /// Operands are two FP values to compare; result is a mask of |
| 100 | /// 0s or 1s. Generally DTRT for C/C++ with NaNs. |
| 101 | FSETCCss, FSETCCsd, |
| 102 | |
Stuart Hastings | 9f20804 | 2011-06-01 04:39:42 +0000 | [diff] [blame] | 103 | /// X86 MOVMSK{pd|ps}, extracts sign bits of two or four FP values, |
| 104 | /// result in an integer GPR. Needs masking for scalar result. |
| 105 | FGETSIGNx86, |
| 106 | |
Chris Lattner | a492d29 | 2009-03-12 06:46:02 +0000 | [diff] [blame] | 107 | /// X86 conditional moves. Operand 0 and operand 1 are the two values |
| 108 | /// to select from. Operand 2 is the condition code, and operand 3 is the |
| 109 | /// flag operand produced by a CMP or TEST instruction. It also writes a |
| 110 | /// flag result. |
Evan Cheng | 225a4d0 | 2005-12-17 01:21:05 +0000 | [diff] [blame] | 111 | CMOV, |
Evan Cheng | 6fc3104 | 2005-12-19 23:12:38 +0000 | [diff] [blame] | 112 | |
Dan Gohman | 4a68347 | 2009-03-23 15:40:10 +0000 | [diff] [blame] | 113 | /// X86 conditional branches. Operand 0 is the chain operand, operand 1 |
| 114 | /// is the block to branch if condition is true, operand 2 is the |
| 115 | /// condition code, and operand 3 is the flag operand produced by a CMP |
Evan Cheng | c1583db | 2005-12-21 20:21:51 +0000 | [diff] [blame] | 116 | /// or TEST instruction. |
Evan Cheng | 6fc3104 | 2005-12-19 23:12:38 +0000 | [diff] [blame] | 117 | BRCOND, |
Evan Cheng | a74ce62 | 2005-12-21 02:39:21 +0000 | [diff] [blame] | 118 | |
Dan Gohman | 4a68347 | 2009-03-23 15:40:10 +0000 | [diff] [blame] | 119 | /// Return with a flag operand. Operand 0 is the chain operand, operand |
| 120 | /// 1 is the number of bytes of stack to pop. |
Evan Cheng | a74ce62 | 2005-12-21 02:39:21 +0000 | [diff] [blame] | 121 | RET_FLAG, |
Evan Cheng | ae986f1 | 2006-01-11 22:15:48 +0000 | [diff] [blame] | 122 | |
| 123 | /// REP_STOS - Repeat fill, corresponds to X86::REP_STOSx. |
| 124 | REP_STOS, |
| 125 | |
| 126 | /// REP_MOVS - Repeat move, corresponds to X86::REP_MOVSx. |
| 127 | REP_MOVS, |
Evan Cheng | 72d5c25 | 2006-01-31 22:28:30 +0000 | [diff] [blame] | 128 | |
Evan Cheng | 5588de9 | 2006-02-18 00:15:05 +0000 | [diff] [blame] | 129 | /// GlobalBaseReg - On Darwin, this node represents the result of the popl |
| 130 | /// at function entry, used for PIC code. |
| 131 | GlobalBaseReg, |
Evan Cheng | 1f342c2 | 2006-02-23 02:43:52 +0000 | [diff] [blame] | 132 | |
Bill Wendling | 24c79f2 | 2008-09-16 21:48:12 +0000 | [diff] [blame] | 133 | /// Wrapper - A wrapper node for TargetConstantPool, |
| 134 | /// TargetExternalSymbol, and TargetGlobalAddress. |
Evan Cheng | e0ed6ec | 2006-02-23 20:41:18 +0000 | [diff] [blame] | 135 | Wrapper, |
Evan Cheng | d5e905d | 2006-03-21 23:01:21 +0000 | [diff] [blame] | 136 | |
Evan Cheng | ae1cd75 | 2006-11-30 21:55:46 +0000 | [diff] [blame] | 137 | /// WrapperRIP - Special wrapper used under X86-64 PIC mode for RIP |
| 138 | /// relative displacements. |
| 139 | WrapperRIP, |
| 140 | |
Dale Johannesen | dd224d2 | 2010-09-30 23:57:10 +0000 | [diff] [blame] | 141 | /// MOVDQ2Q - Copies a 64-bit value from the low word of an XMM vector |
| 142 | /// to an MMX vector. If you think this is too close to the previous |
| 143 | /// mnemonic, so do I; blame Intel. |
| 144 | MOVDQ2Q, |
| 145 | |
Manman Ren | acb8bec | 2012-10-30 22:15:38 +0000 | [diff] [blame] | 146 | /// MMX_MOVD2W - Copies a 32-bit value from the low word of a MMX |
| 147 | /// vector to a GPR. |
| 148 | MMX_MOVD2W, |
| 149 | |
Nate Begeman | 2d77e8e4 | 2008-02-11 04:19:36 +0000 | [diff] [blame] | 150 | /// PEXTRB - Extract an 8-bit value from a vector and zero extend it to |
| 151 | /// i32, corresponds to X86::PEXTRB. |
| 152 | PEXTRB, |
| 153 | |
Evan Cheng | cbffa46 | 2006-03-31 19:22:53 +0000 | [diff] [blame] | 154 | /// PEXTRW - Extract a 16-bit value from a vector and zero extend it to |
Evan Cheng | 5fd7c69 | 2006-03-31 21:55:24 +0000 | [diff] [blame] | 155 | /// i32, corresponds to X86::PEXTRW. |
Evan Cheng | cbffa46 | 2006-03-31 19:22:53 +0000 | [diff] [blame] | 156 | PEXTRW, |
Evan Cheng | 5fd7c69 | 2006-03-31 21:55:24 +0000 | [diff] [blame] | 157 | |
Nate Begeman | 2d77e8e4 | 2008-02-11 04:19:36 +0000 | [diff] [blame] | 158 | /// INSERTPS - Insert any element of a 4 x float vector into any element |
| 159 | /// of a destination 4 x floatvector. |
| 160 | INSERTPS, |
| 161 | |
| 162 | /// PINSRB - Insert the lower 8-bits of a 32-bit value to a vector, |
| 163 | /// corresponds to X86::PINSRB. |
| 164 | PINSRB, |
| 165 | |
Evan Cheng | 5fd7c69 | 2006-03-31 21:55:24 +0000 | [diff] [blame] | 166 | /// PINSRW - Insert the lower 16-bits of a 32-bit value to a vector, |
| 167 | /// corresponds to X86::PINSRW. |
Chris Lattner | a828850 | 2010-02-23 02:07:48 +0000 | [diff] [blame] | 168 | PINSRW, MMX_PINSRW, |
Evan Cheng | 49683ba | 2006-11-10 21:43:37 +0000 | [diff] [blame] | 169 | |
Nate Begeman | e684da3 | 2009-02-23 08:49:38 +0000 | [diff] [blame] | 170 | /// PSHUFB - Shuffle 16 8-bit values within a vector. |
| 171 | PSHUFB, |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 172 | |
Bruno Cardoso Lopes | 7ba479d | 2011-07-13 21:36:47 +0000 | [diff] [blame] | 173 | /// ANDNP - Bitwise Logical AND NOT of Packed FP values. |
| 174 | ANDNP, |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 175 | |
Craig Topper | 81390be | 2011-11-19 07:33:10 +0000 | [diff] [blame] | 176 | /// PSIGN - Copy integer sign. |
| 177 | PSIGN, |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 178 | |
Elena Demikhovsky | cd3c1c4 | 2012-12-05 09:24:57 +0000 | [diff] [blame^] | 179 | /// BLENDV - Blend where the selector is a register. |
Nadav Rotem | de838da | 2011-09-09 20:29:17 +0000 | [diff] [blame] | 180 | BLENDV, |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 181 | |
Elena Demikhovsky | cd3c1c4 | 2012-12-05 09:24:57 +0000 | [diff] [blame^] | 182 | /// BLENDI - Blend where the selector is an immediate. |
| 183 | BLENDI, |
Nadav Rotem | 9bc178a | 2012-04-11 06:40:27 +0000 | [diff] [blame] | 184 | |
Craig Topper | f984efb | 2011-11-19 09:02:40 +0000 | [diff] [blame] | 185 | /// HADD - Integer horizontal add. |
| 186 | HADD, |
| 187 | |
| 188 | /// HSUB - Integer horizontal sub. |
| 189 | HSUB, |
| 190 | |
Duncan Sands | 0e4fcb8 | 2011-09-22 20:15:48 +0000 | [diff] [blame] | 191 | /// FHADD - Floating point horizontal add. |
| 192 | FHADD, |
| 193 | |
| 194 | /// FHSUB - Floating point horizontal sub. |
| 195 | FHSUB, |
| 196 | |
Evan Cheng | 49683ba | 2006-11-10 21:43:37 +0000 | [diff] [blame] | 197 | /// FMAX, FMIN - Floating point max and min. |
| 198 | /// |
Lauro Ramos Venancio | 2518889 | 2007-04-20 21:38:10 +0000 | [diff] [blame] | 199 | FMAX, FMIN, |
Dan Gohman | 57111e7 | 2007-07-10 00:05:58 +0000 | [diff] [blame] | 200 | |
Nadav Rotem | 178250a | 2012-08-19 13:06:16 +0000 | [diff] [blame] | 201 | /// FMAXC, FMINC - Commutative FMIN and FMAX. |
| 202 | FMAXC, FMINC, |
| 203 | |
Dan Gohman | 57111e7 | 2007-07-10 00:05:58 +0000 | [diff] [blame] | 204 | /// FRSQRT, FRCP - Floating point reciprocal-sqrt and reciprocal |
| 205 | /// approximation. Note that these typically require refinement |
| 206 | /// in order to obtain suitable precision. |
| 207 | FRSQRT, FRCP, |
| 208 | |
Rafael Espindola | 3b2df10 | 2009-04-08 21:14:34 +0000 | [diff] [blame] | 209 | // TLSADDR - Thread Local Storage. |
| 210 | TLSADDR, |
Michael J. Spencer | 9cafc87 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 211 | |
Hans Wennborg | 789acfb | 2012-06-01 16:27:21 +0000 | [diff] [blame] | 212 | // TLSBASEADDR - Thread Local Storage. A call to get the start address |
| 213 | // of the TLS block for the current module. |
| 214 | TLSBASEADDR, |
| 215 | |
Eric Christopher | b0e1a45 | 2010-06-03 04:07:48 +0000 | [diff] [blame] | 216 | // TLSCALL - Thread Local Storage. When calling to an OS provided |
| 217 | // thunk at the address from an earlier relocation. |
| 218 | TLSCALL, |
Rafael Espindola | 3b2df10 | 2009-04-08 21:14:34 +0000 | [diff] [blame] | 219 | |
Evan Cheng | 78af38c | 2008-05-08 00:57:18 +0000 | [diff] [blame] | 220 | // EH_RETURN - Exception Handling helpers. |
Arnold Schwaighofer | 9ccea99 | 2007-10-11 19:40:01 +0000 | [diff] [blame] | 221 | EH_RETURN, |
Michael J. Spencer | 9cafc87 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 222 | |
Michael Liao | 97bf363 | 2012-10-15 22:39:43 +0000 | [diff] [blame] | 223 | // EH_SJLJ_SETJMP - SjLj exception handling setjmp. |
| 224 | EH_SJLJ_SETJMP, |
| 225 | |
| 226 | // EH_SJLJ_LONGJMP - SjLj exception handling longjmp. |
| 227 | EH_SJLJ_LONGJMP, |
| 228 | |
Arnold Schwaighofer | 7da2bce | 2008-03-19 16:39:45 +0000 | [diff] [blame] | 229 | /// TC_RETURN - Tail call return. |
| 230 | /// operand #0 chain |
| 231 | /// operand #1 callee (register or absolute) |
| 232 | /// operand #2 stack adjustment |
| 233 | /// operand #3 optional in flag |
Anton Korobeynikov | 91460e4 | 2007-11-16 01:31:51 +0000 | [diff] [blame] | 234 | TC_RETURN, |
| 235 | |
Evan Cheng | 961339b | 2008-05-09 21:53:03 +0000 | [diff] [blame] | 236 | // VZEXT_MOVL - Vector move low and zero extend. |
| 237 | VZEXT_MOVL, |
| 238 | |
Craig Topper | 1d471e3 | 2012-02-05 03:14:49 +0000 | [diff] [blame] | 239 | // VSEXT_MOVL - Vector move low and sign extend. |
Elena Demikhovsky | fb44980 | 2012-02-02 09:10:43 +0000 | [diff] [blame] | 240 | VSEXT_MOVL, |
| 241 | |
Michael Liao | 1be96bb | 2012-10-23 17:34:00 +0000 | [diff] [blame] | 242 | // VZEXT - Vector integer zero-extend. |
| 243 | VZEXT, |
| 244 | |
| 245 | // VSEXT - Vector integer signed-extend. |
| 246 | VSEXT, |
| 247 | |
Michael Liao | 34107b9 | 2012-08-14 21:24:47 +0000 | [diff] [blame] | 248 | // VFPEXT - Vector FP extend. |
| 249 | VFPEXT, |
| 250 | |
Michael Liao | e999b86 | 2012-10-10 16:53:28 +0000 | [diff] [blame] | 251 | // VFPROUND - Vector FP round. |
| 252 | VFPROUND, |
| 253 | |
Craig Topper | 0946264 | 2012-01-22 19:15:14 +0000 | [diff] [blame] | 254 | // VSHL, VSRL - 128-bit vector logical left / right shift |
| 255 | VSHLDQ, VSRLDQ, |
| 256 | |
| 257 | // VSHL, VSRL, VSRA - Vector shift elements |
| 258 | VSHL, VSRL, VSRA, |
| 259 | |
| 260 | // VSHLI, VSRLI, VSRAI - Vector shift elements by immediate |
| 261 | VSHLI, VSRLI, VSRAI, |
Nate Begeman | 8d6d4b9 | 2009-04-27 18:41:29 +0000 | [diff] [blame] | 262 | |
Craig Topper | 0b7ad76 | 2012-01-22 23:36:02 +0000 | [diff] [blame] | 263 | // CMPP - Vector packed double/float comparison. |
| 264 | CMPP, |
Michael J. Spencer | 9cafc87 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 265 | |
Nate Begeman | 55b7bec | 2008-07-17 16:51:19 +0000 | [diff] [blame] | 266 | // PCMP* - Vector integer comparisons. |
Craig Topper | bd488437 | 2012-01-22 22:42:16 +0000 | [diff] [blame] | 267 | PCMPEQ, PCMPGT, |
Bill Wendling | 1a31767 | 2008-12-12 00:56:36 +0000 | [diff] [blame] | 268 | |
Chris Lattner | 364bb0a | 2010-12-05 07:30:36 +0000 | [diff] [blame] | 269 | // ADD, SUB, SMUL, etc. - Arithmetic operations with FLAGS results. |
Chris Lattner | 846c20d | 2010-12-20 00:59:46 +0000 | [diff] [blame] | 270 | ADD, SUB, ADC, SBB, SMUL, |
Dan Gohman | 722b1ee | 2009-09-18 19:59:53 +0000 | [diff] [blame] | 271 | INC, DEC, OR, XOR, AND, |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 272 | |
Craig Topper | 965de2c | 2011-10-14 07:06:56 +0000 | [diff] [blame] | 273 | ANDN, // ANDN - Bitwise AND NOT with FLAGS results. |
| 274 | |
Craig Topper | 039a790 | 2011-10-21 06:55:01 +0000 | [diff] [blame] | 275 | BLSI, // BLSI - Extract lowest set isolated bit |
| 276 | BLSMSK, // BLSMSK - Get mask up to lowest set bit |
| 277 | BLSR, // BLSR - Reset lowest set bit |
| 278 | |
Chris Lattner | 364bb0a | 2010-12-05 07:30:36 +0000 | [diff] [blame] | 279 | UMUL, // LOW, HI, FLAGS = umul LHS, RHS |
Evan Cheng | a84a318 | 2009-03-30 21:36:47 +0000 | [diff] [blame] | 280 | |
| 281 | // MUL_IMM - X86 specific multiply by immediate. |
Eric Christopher | f7802a3 | 2009-07-29 00:28:05 +0000 | [diff] [blame] | 282 | MUL_IMM, |
Michael J. Spencer | 9cafc87 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 283 | |
Eric Christopher | f7802a3 | 2009-07-29 00:28:05 +0000 | [diff] [blame] | 284 | // PTEST - Vector bitwise comparisons |
Dan Gohman | 0700a56 | 2009-08-15 01:38:56 +0000 | [diff] [blame] | 285 | PTEST, |
| 286 | |
Bruno Cardoso Lopes | 91d61df | 2010-08-10 23:25:42 +0000 | [diff] [blame] | 287 | // TESTP - Vector packed fp sign bitwise comparisons |
| 288 | TESTP, |
| 289 | |
Bruno Cardoso Lopes | 6f3b38a | 2010-08-20 22:55:05 +0000 | [diff] [blame] | 290 | // Several flavors of instructions with vector shuffle behaviors. |
| 291 | PALIGN, |
| 292 | PSHUFD, |
| 293 | PSHUFHW, |
| 294 | PSHUFLW, |
Craig Topper | 6e54ba7 | 2011-12-31 23:50:21 +0000 | [diff] [blame] | 295 | SHUFP, |
Bruno Cardoso Lopes | 6f3b38a | 2010-08-20 22:55:05 +0000 | [diff] [blame] | 296 | MOVDDUP, |
| 297 | MOVSHDUP, |
| 298 | MOVSLDUP, |
Bruno Cardoso Lopes | 6f3b38a | 2010-08-20 22:55:05 +0000 | [diff] [blame] | 299 | MOVLHPS, |
Bruno Cardoso Lopes | 6f3b38a | 2010-08-20 22:55:05 +0000 | [diff] [blame] | 300 | MOVLHPD, |
Bruno Cardoso Lopes | 03e4c35 | 2010-08-31 21:15:21 +0000 | [diff] [blame] | 301 | MOVHLPS, |
Bruno Cardoso Lopes | b382521 | 2010-09-01 05:08:25 +0000 | [diff] [blame] | 302 | MOVLPS, |
| 303 | MOVLPD, |
Bruno Cardoso Lopes | 6f3b38a | 2010-08-20 22:55:05 +0000 | [diff] [blame] | 304 | MOVSD, |
| 305 | MOVSS, |
Craig Topper | 8d4ba19 | 2011-12-06 08:21:25 +0000 | [diff] [blame] | 306 | UNPCKL, |
| 307 | UNPCKH, |
Craig Topper | bafd224 | 2011-11-30 06:25:25 +0000 | [diff] [blame] | 308 | VPERMILP, |
Craig Topper | b86fa40 | 2012-04-16 00:41:45 +0000 | [diff] [blame] | 309 | VPERMV, |
| 310 | VPERMI, |
Craig Topper | 0a672ea | 2011-11-30 07:47:51 +0000 | [diff] [blame] | 311 | VPERM2X128, |
Bruno Cardoso Lopes | be5e987 | 2011-08-17 02:29:19 +0000 | [diff] [blame] | 312 | VBROADCAST, |
Bruno Cardoso Lopes | 6f3b38a | 2010-08-20 22:55:05 +0000 | [diff] [blame] | 313 | |
Craig Topper | 1d471e3 | 2012-02-05 03:14:49 +0000 | [diff] [blame] | 314 | // PMULUDQ - Vector multiply packed unsigned doubleword integers |
| 315 | PMULUDQ, |
| 316 | |
Elena Demikhovsky | 3cb3b00 | 2012-08-01 12:06:00 +0000 | [diff] [blame] | 317 | // FMA nodes |
| 318 | FMADD, |
| 319 | FNMADD, |
| 320 | FMSUB, |
| 321 | FNMSUB, |
| 322 | FMADDSUB, |
| 323 | FMSUBADD, |
| 324 | |
Dan Gohman | 0700a56 | 2009-08-15 01:38:56 +0000 | [diff] [blame] | 325 | // VASTART_SAVE_XMM_REGS - Save xmm argument registers to the stack, |
| 326 | // according to %al. An operator is needed so that this can be expanded |
| 327 | // with control flow. |
Dan Gohman | 48b185d | 2009-09-25 20:36:54 +0000 | [diff] [blame] | 328 | VASTART_SAVE_XMM_REGS, |
| 329 | |
Michael J. Spencer | f509c6c | 2010-10-21 01:41:01 +0000 | [diff] [blame] | 330 | // WIN_ALLOCA - Windows's _chkstk call to do stack probing. |
| 331 | WIN_ALLOCA, |
Anton Korobeynikov | d5e3fd6 | 2010-03-06 19:32:29 +0000 | [diff] [blame] | 332 | |
Rafael Espindola | 3353017 | 2011-08-30 19:43:21 +0000 | [diff] [blame] | 333 | // SEG_ALLOCA - For allocating variable amounts of stack space when using |
| 334 | // segmented stacks. Check if the current stacklet has enough space, and |
Rafael Espindola | 9d96c94 | 2011-09-06 19:29:31 +0000 | [diff] [blame] | 335 | // falls back to heap allocation if not. |
Rafael Espindola | 3353017 | 2011-08-30 19:43:21 +0000 | [diff] [blame] | 336 | SEG_ALLOCA, |
| 337 | |
Michael J. Spencer | 248d65e | 2012-02-24 19:01:22 +0000 | [diff] [blame] | 338 | // WIN_FTOL - Windows's _ftol2 runtime routine to do fptoui. |
| 339 | WIN_FTOL, |
| 340 | |
Duncan Sands | 7c601de | 2010-11-20 11:25:00 +0000 | [diff] [blame] | 341 | // Memory barrier |
| 342 | MEMBARRIER, |
| 343 | MFENCE, |
| 344 | SFENCE, |
| 345 | LFENCE, |
| 346 | |
Benjamin Kramer | 913da4b | 2012-04-27 12:07:43 +0000 | [diff] [blame] | 347 | // FNSTSW16r - Store FP status word into i16 register. |
| 348 | FNSTSW16r, |
| 349 | |
| 350 | // SAHF - Store contents of %ah into %eflags. |
| 351 | SAHF, |
| 352 | |
Benjamin Kramer | 0ab2794 | 2012-07-12 09:31:43 +0000 | [diff] [blame] | 353 | // RDRAND - Get a random integer and indicate whether it is valid in CF. |
| 354 | RDRAND, |
| 355 | |
Craig Topper | ab47fe4 | 2012-08-06 06:22:36 +0000 | [diff] [blame] | 356 | // PCMP*STRI |
| 357 | PCMPISTRI, |
| 358 | PCMPESTRI, |
| 359 | |
Michael J. Spencer | 9cafc87 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 360 | // ATOMADD64_DAG, ATOMSUB64_DAG, ATOMOR64_DAG, ATOMAND64_DAG, |
| 361 | // ATOMXOR64_DAG, ATOMNAND64_DAG, ATOMSWAP64_DAG - |
Dan Gohman | 48b185d | 2009-09-25 20:36:54 +0000 | [diff] [blame] | 362 | // Atomic 64-bit binary operations. |
| 363 | ATOMADD64_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE, |
| 364 | ATOMSUB64_DAG, |
| 365 | ATOMOR64_DAG, |
| 366 | ATOMXOR64_DAG, |
| 367 | ATOMAND64_DAG, |
| 368 | ATOMNAND64_DAG, |
Michael Liao | de51caf | 2012-09-25 18:08:13 +0000 | [diff] [blame] | 369 | ATOMMAX64_DAG, |
| 370 | ATOMMIN64_DAG, |
| 371 | ATOMUMAX64_DAG, |
| 372 | ATOMUMIN64_DAG, |
Eric Christopher | 9a77382 | 2010-07-22 02:48:34 +0000 | [diff] [blame] | 373 | ATOMSWAP64_DAG, |
Michael J. Spencer | 9cafc87 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 374 | |
Eli Friedman | 5e57042 | 2011-08-26 21:21:21 +0000 | [diff] [blame] | 375 | // LCMPXCHG_DAG, LCMPXCHG8_DAG, LCMPXCHG16_DAG - Compare and swap. |
Chris Lattner | e479e96 | 2010-09-21 23:59:42 +0000 | [diff] [blame] | 376 | LCMPXCHG_DAG, |
Chris Lattner | 54e5329 | 2010-09-22 00:34:38 +0000 | [diff] [blame] | 377 | LCMPXCHG8_DAG, |
Eli Friedman | 5e57042 | 2011-08-26 21:21:21 +0000 | [diff] [blame] | 378 | LCMPXCHG16_DAG, |
Anton Korobeynikov | d5e3fd6 | 2010-03-06 19:32:29 +0000 | [diff] [blame] | 379 | |
Chris Lattner | 54e5329 | 2010-09-22 00:34:38 +0000 | [diff] [blame] | 380 | // VZEXT_LOAD - Load, scalar_to_vector, and zero extend. |
Chris Lattner | 78f518b | 2010-09-22 01:05:16 +0000 | [diff] [blame] | 381 | VZEXT_LOAD, |
Michael J. Spencer | 9cafc87 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 382 | |
Chris Lattner | ed85da5 | 2010-09-22 01:11:26 +0000 | [diff] [blame] | 383 | // FNSTCW16m - Store FP control world into i16 memory. |
| 384 | FNSTCW16m, |
Michael J. Spencer | 9cafc87 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 385 | |
Chris Lattner | 78f518b | 2010-09-22 01:05:16 +0000 | [diff] [blame] | 386 | /// FP_TO_INT*_IN_MEM - This instruction implements FP_TO_SINT with the |
| 387 | /// integer destination in memory and a FP reg source. This corresponds |
| 388 | /// to the X86::FIST*m instructions and the rounding mode change stuff. It |
| 389 | /// has two inputs (token chain and address) and two outputs (int value |
| 390 | /// and token chain). |
| 391 | FP_TO_INT16_IN_MEM, |
| 392 | FP_TO_INT32_IN_MEM, |
Chris Lattner | a5156c3 | 2010-09-22 01:28:21 +0000 | [diff] [blame] | 393 | FP_TO_INT64_IN_MEM, |
Michael J. Spencer | 9cafc87 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 394 | |
Chris Lattner | a5156c3 | 2010-09-22 01:28:21 +0000 | [diff] [blame] | 395 | /// FILD, FILD_FLAG - This instruction implements SINT_TO_FP with the |
| 396 | /// integer source in memory and FP reg result. This corresponds to the |
| 397 | /// X86::FILD*m instructions. It has three inputs (token chain, address, |
| 398 | /// and source type) and two outputs (FP value and token chain). FILD_FLAG |
| 399 | /// also produces a flag). |
| 400 | FILD, |
| 401 | FILD_FLAG, |
Michael J. Spencer | 9cafc87 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 402 | |
Chris Lattner | a5156c3 | 2010-09-22 01:28:21 +0000 | [diff] [blame] | 403 | /// FLD - This instruction implements an extending load to FP stack slots. |
| 404 | /// This corresponds to the X86::FLD32m / X86::FLD64m. It takes a chain |
| 405 | /// operand, ptr to load from, and a ValueType node indicating the type |
| 406 | /// to load to. |
| 407 | FLD, |
Michael J. Spencer | 9cafc87 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 408 | |
Chris Lattner | a5156c3 | 2010-09-22 01:28:21 +0000 | [diff] [blame] | 409 | /// FST - This instruction implements a truncating store to FP stack |
| 410 | /// slots. This corresponds to the X86::FST32m / X86::FST64m. It takes a |
| 411 | /// chain operand, value to store, address, and a ValueType to store it |
| 412 | /// as. |
Dan Gohman | 395a898 | 2010-10-12 18:00:49 +0000 | [diff] [blame] | 413 | FST, |
| 414 | |
| 415 | /// VAARG_64 - This instruction grabs the address of the next argument |
| 416 | /// from a va_list. (reads and modifies the va_list in memory) |
| 417 | VAARG_64 |
Michael J. Spencer | 9cafc87 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 418 | |
Anton Korobeynikov | d5e3fd6 | 2010-03-06 19:32:29 +0000 | [diff] [blame] | 419 | // WARNING: Do not add anything in the end unless you want the node to |
| 420 | // have memop! In fact, starting from ATOMADD64_DAG all opcodes will be |
| 421 | // thought as target memory ops! |
Chris Lattner | 76ac068 | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 422 | }; |
| 423 | } |
| 424 | |
Evan Cheng | 084a1cd | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 425 | /// Define some predicates that are used for node matching. |
| 426 | namespace X86 { |
David Greene | c4da110 | 2011-02-03 15:50:00 +0000 | [diff] [blame] | 427 | /// isVEXTRACTF128Index - Return true if the specified |
| 428 | /// EXTRACT_SUBVECTOR operand specifies a vector extract that is |
| 429 | /// suitable for input to VEXTRACTF128. |
| 430 | bool isVEXTRACTF128Index(SDNode *N); |
| 431 | |
David Greene | 653f1ee | 2011-02-04 16:08:29 +0000 | [diff] [blame] | 432 | /// isVINSERTF128Index - Return true if the specified |
| 433 | /// INSERT_SUBVECTOR operand specifies a subvector insert that is |
| 434 | /// suitable for input to VINSERTF128. |
| 435 | bool isVINSERTF128Index(SDNode *N); |
| 436 | |
David Greene | c4da110 | 2011-02-03 15:50:00 +0000 | [diff] [blame] | 437 | /// getExtractVEXTRACTF128Immediate - Return the appropriate |
| 438 | /// immediate to extract the specified EXTRACT_SUBVECTOR index |
| 439 | /// with VEXTRACTF128 instructions. |
| 440 | unsigned getExtractVEXTRACTF128Immediate(SDNode *N); |
| 441 | |
David Greene | 653f1ee | 2011-02-04 16:08:29 +0000 | [diff] [blame] | 442 | /// getInsertVINSERTF128Immediate - Return the appropriate |
| 443 | /// immediate to insert at the specified INSERT_SUBVECTOR index |
| 444 | /// with VINSERTF128 instructions. |
| 445 | unsigned getInsertVINSERTF128Immediate(SDNode *N); |
| 446 | |
Evan Cheng | e62288f | 2009-07-30 08:33:02 +0000 | [diff] [blame] | 447 | /// isZeroNode - Returns true if Elt is a constant zero or a floating point |
| 448 | /// constant +0.0. |
| 449 | bool isZeroNode(SDValue Elt); |
Anton Korobeynikov | 741ea0d | 2009-08-05 23:01:26 +0000 | [diff] [blame] | 450 | |
| 451 | /// isOffsetSuitableForCodeModel - Returns true of the given offset can be |
| 452 | /// fit into displacement field of the instruction. |
| 453 | bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M, |
| 454 | bool hasSymbolicDisplacement = true); |
Evan Cheng | 3a0c5e5 | 2011-06-23 17:54:54 +0000 | [diff] [blame] | 455 | |
| 456 | |
| 457 | /// isCalleePop - Determines whether the callee is required to pop its |
| 458 | /// own arguments. Callee pop is necessary to support tail calls. |
| 459 | bool isCalleePop(CallingConv::ID CallingConv, |
| 460 | bool is64Bit, bool IsVarArg, bool TailCallOpt); |
Evan Cheng | 084a1cd | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 461 | } |
| 462 | |
Chris Lattner | f4aeff0 | 2006-10-18 18:26:48 +0000 | [diff] [blame] | 463 | //===--------------------------------------------------------------------===// |
Chris Lattner | 76ac068 | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 464 | // X86TargetLowering - X86 Implementation of the TargetLowering interface |
| 465 | class X86TargetLowering : public TargetLowering { |
Chris Lattner | 76ac068 | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 466 | public: |
Dan Gohman | eabd647 | 2008-05-14 01:58:56 +0000 | [diff] [blame] | 467 | explicit X86TargetLowering(X86TargetMachine &TM); |
Chris Lattner | 76ac068 | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 468 | |
Chris Lattner | 4bfbe93 | 2010-01-26 05:02:42 +0000 | [diff] [blame] | 469 | virtual unsigned getJumpTableEncoding() const; |
Chris Lattner | 9c1efcd | 2010-01-25 23:38:14 +0000 | [diff] [blame] | 470 | |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 471 | virtual MVT getShiftAmountTy(EVT LHSTy) const { return MVT::i8; } |
| 472 | |
Chris Lattner | 4bfbe93 | 2010-01-26 05:02:42 +0000 | [diff] [blame] | 473 | virtual const MCExpr * |
| 474 | LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, |
| 475 | const MachineBasicBlock *MBB, unsigned uid, |
| 476 | MCContext &Ctx) const; |
Michael J. Spencer | 9cafc87 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 477 | |
Evan Cheng | 797d56f | 2007-11-09 01:32:10 +0000 | [diff] [blame] | 478 | /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC |
| 479 | /// jumptable. |
Chris Lattner | 4bfbe93 | 2010-01-26 05:02:42 +0000 | [diff] [blame] | 480 | virtual SDValue getPICJumpTableRelocBase(SDValue Table, |
| 481 | SelectionDAG &DAG) const; |
Chris Lattner | 8a785d7 | 2010-01-26 06:28:43 +0000 | [diff] [blame] | 482 | virtual const MCExpr * |
| 483 | getPICJumpTableRelocBaseExpr(const MachineFunction *MF, |
| 484 | unsigned JTI, MCContext &Ctx) const; |
Michael J. Spencer | 9cafc87 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 485 | |
Evan Cheng | 35abd84 | 2008-01-23 23:17:41 +0000 | [diff] [blame] | 486 | /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate |
| 487 | /// function arguments in the caller parameter area. For X86, aggregates |
| 488 | /// that contains are placed at 16-byte boundaries while the rest are at |
| 489 | /// 4-byte boundaries. |
Chris Lattner | 229907c | 2011-07-18 04:54:35 +0000 | [diff] [blame] | 490 | virtual unsigned getByValTypeAlignment(Type *Ty) const; |
Evan Cheng | ef377ad | 2008-05-15 08:39:06 +0000 | [diff] [blame] | 491 | |
| 492 | /// getOptimalMemOpType - Returns the target specific optimal type for load |
Evan Cheng | 6139937 | 2010-04-02 19:36:14 +0000 | [diff] [blame] | 493 | /// and store operations as a result of memset, memcpy, and memmove |
| 494 | /// lowering. If DstAlign is zero that means it's safe to destination |
| 495 | /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it |
| 496 | /// means there isn't a need to check it against alignment requirement, |
| 497 | /// probably because the source does not need to be loaded. If |
Lang Hames | 58dba01 | 2011-10-26 23:50:43 +0000 | [diff] [blame] | 498 | /// 'IsZeroVal' is true, that means it's safe to return a |
Evan Cheng | 6139937 | 2010-04-02 19:36:14 +0000 | [diff] [blame] | 499 | /// non-scalar-integer type, e.g. empty string source, constant, or loaded |
Evan Cheng | ebe47c8 | 2010-04-08 07:37:57 +0000 | [diff] [blame] | 500 | /// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is |
| 501 | /// constant so it does not need to be loaded. |
Dan Gohman | 148c69a | 2010-04-16 20:11:05 +0000 | [diff] [blame] | 502 | /// It returns EVT::Other if the type should be determined using generic |
| 503 | /// target-independent logic. |
Evan Cheng | 6139937 | 2010-04-02 19:36:14 +0000 | [diff] [blame] | 504 | virtual EVT |
Evan Cheng | ebe47c8 | 2010-04-08 07:37:57 +0000 | [diff] [blame] | 505 | getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, |
Lang Hames | 58dba01 | 2011-10-26 23:50:43 +0000 | [diff] [blame] | 506 | bool IsZeroVal, bool MemcpyStrSrc, |
Dan Gohman | 148c69a | 2010-04-16 20:11:05 +0000 | [diff] [blame] | 507 | MachineFunction &MF) const; |
Bill Wendling | bae6b2c | 2009-08-15 21:21:19 +0000 | [diff] [blame] | 508 | |
| 509 | /// allowsUnalignedMemoryAccesses - Returns true if the target allows |
| 510 | /// unaligned memory accesses. of the specified type. |
| 511 | virtual bool allowsUnalignedMemoryAccesses(EVT VT) const { |
| 512 | return true; |
| 513 | } |
Bill Wendling | 31ceb1b | 2009-06-30 22:38:32 +0000 | [diff] [blame] | 514 | |
Chris Lattner | 76ac068 | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 515 | /// LowerOperation - Provide custom lowering hooks for some operations. |
| 516 | /// |
Dan Gohman | 21cea8a | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 517 | virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; |
Chris Lattner | 76ac068 | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 518 | |
Duncan Sands | 6ed4014 | 2008-12-01 11:39:25 +0000 | [diff] [blame] | 519 | /// ReplaceNodeResults - Replace the results of node with an illegal result |
| 520 | /// type with new values built out of custom code. |
Chris Lattner | f81d588 | 2007-11-24 07:07:01 +0000 | [diff] [blame] | 521 | /// |
Duncan Sands | 6ed4014 | 2008-12-01 11:39:25 +0000 | [diff] [blame] | 522 | virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, |
Dan Gohman | 21cea8a | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 523 | SelectionDAG &DAG) const; |
Chris Lattner | f81d588 | 2007-11-24 07:07:01 +0000 | [diff] [blame] | 524 | |
Michael J. Spencer | 9cafc87 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 525 | |
Dan Gohman | 2ce6f2a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 526 | virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; |
Evan Cheng | 5987cfb | 2006-07-07 08:33:52 +0000 | [diff] [blame] | 527 | |
Evan Cheng | f1bd5fc | 2010-04-17 06:13:15 +0000 | [diff] [blame] | 528 | /// isTypeDesirableForOp - Return true if the target has native support for |
| 529 | /// the specified value type and it is 'desirable' to use the type for the |
| 530 | /// given node type. e.g. On x86 i16 is legal, but undesirable since i16 |
| 531 | /// instruction encodings are longer and some i16 instructions are slow. |
| 532 | virtual bool isTypeDesirableForOp(unsigned Opc, EVT VT) const; |
| 533 | |
| 534 | /// isTypeDesirable - Return true if the target has native support for the |
| 535 | /// specified value type and it is 'desirable' to use the type. e.g. On x86 |
| 536 | /// i16 is legal, but undesirable since i16 instruction encodings are longer |
| 537 | /// and some i16 instructions are slow. |
| 538 | virtual bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const; |
Evan Cheng | af56fac | 2010-04-16 06:14:10 +0000 | [diff] [blame] | 539 | |
Dan Gohman | 25c1653 | 2010-05-01 00:01:06 +0000 | [diff] [blame] | 540 | virtual MachineBasicBlock * |
| 541 | EmitInstrWithCustomInserter(MachineInstr *MI, |
| 542 | MachineBasicBlock *MBB) const; |
Evan Cheng | 339edad | 2006-01-11 00:33:36 +0000 | [diff] [blame] | 543 | |
Michael J. Spencer | 9cafc87 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 544 | |
Evan Cheng | 6af0263 | 2005-12-20 06:22:03 +0000 | [diff] [blame] | 545 | /// getTargetNodeName - This method returns the name of a target specific |
| 546 | /// DAG node. |
| 547 | virtual const char *getTargetNodeName(unsigned Opcode) const; |
| 548 | |
Duncan Sands | f2641e1 | 2011-09-06 19:07:46 +0000 | [diff] [blame] | 549 | /// getSetCCResultType - Return the value type to use for ISD::SETCC. |
| 550 | virtual EVT getSetCCResultType(EVT VT) const; |
Scott Michel | a6729e8 | 2008-03-10 15:42:14 +0000 | [diff] [blame] | 551 | |
Michael J. Spencer | 9cafc87 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 552 | /// computeMaskedBitsForTargetNode - Determine which of the bits specified |
| 553 | /// in Mask are known to be either zero or one and return them in the |
Nate Begeman | 8a77efe | 2006-02-16 21:11:51 +0000 | [diff] [blame] | 554 | /// KnownZero/KnownOne bitsets. |
Dan Gohman | 2ce6f2a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 555 | virtual void computeMaskedBitsForTargetNode(const SDValue Op, |
Michael J. Spencer | 9cafc87 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 556 | APInt &KnownZero, |
Dan Gohman | f990faf | 2008-02-13 00:35:47 +0000 | [diff] [blame] | 557 | APInt &KnownOne, |
Dan Gohman | 309d3d5 | 2007-06-22 14:59:07 +0000 | [diff] [blame] | 558 | const SelectionDAG &DAG, |
Nate Begeman | 8a77efe | 2006-02-16 21:11:51 +0000 | [diff] [blame] | 559 | unsigned Depth = 0) const; |
Evan Cheng | 2609d5e | 2008-05-12 19:56:52 +0000 | [diff] [blame] | 560 | |
Owen Anderson | 5e65dfb | 2010-09-21 20:42:50 +0000 | [diff] [blame] | 561 | // ComputeNumSignBitsForTargetNode - Determine the number of bits in the |
| 562 | // operation that are sign bits. |
| 563 | virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, |
| 564 | unsigned Depth) const; |
| 565 | |
Evan Cheng | 2609d5e | 2008-05-12 19:56:52 +0000 | [diff] [blame] | 566 | virtual bool |
Dan Gohman | bcaf681 | 2010-04-15 01:51:59 +0000 | [diff] [blame] | 567 | isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const; |
Michael J. Spencer | 9cafc87 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 568 | |
Dan Gohman | 21cea8a | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 569 | SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const; |
Chris Lattner | 76ac068 | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 570 | |
Chris Lattner | 5849d22 | 2009-07-20 17:51:36 +0000 | [diff] [blame] | 571 | virtual bool ExpandInlineAsm(CallInst *CI) const; |
Michael J. Spencer | 9cafc87 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 572 | |
Chris Lattner | d685514 | 2007-03-25 02:14:49 +0000 | [diff] [blame] | 573 | ConstraintType getConstraintType(const std::string &Constraint) const; |
Michael J. Spencer | 9cafc87 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 574 | |
John Thompson | e8360b7 | 2010-10-29 17:29:13 +0000 | [diff] [blame] | 575 | /// Examine constraint string and operand type and determine a weight value. |
John Thompson | 1094c80 | 2010-09-13 18:15:37 +0000 | [diff] [blame] | 576 | /// The operand object must already have been set up with the operand type. |
John Thompson | e8360b7 | 2010-10-29 17:29:13 +0000 | [diff] [blame] | 577 | virtual ConstraintWeight getSingleConstraintMatchWeight( |
John Thompson | 1094c80 | 2010-09-13 18:15:37 +0000 | [diff] [blame] | 578 | AsmOperandInfo &info, const char *constraint) const; |
Michael J. Spencer | 9cafc87 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 579 | |
Owen Anderson | 53aa7a9 | 2009-08-10 22:56:29 +0000 | [diff] [blame] | 580 | virtual const char *LowerXConstraint(EVT ConstraintVT) const; |
Dale Johannesen | 2b3bc30 | 2008-01-29 02:21:21 +0000 | [diff] [blame] | 581 | |
Chris Lattner | d8c9cb9 | 2007-08-25 00:47:38 +0000 | [diff] [blame] | 582 | /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops |
Evan Cheng | e0add20 | 2008-09-24 00:05:32 +0000 | [diff] [blame] | 583 | /// vector. If it is invalid, don't add anything to Ops. If hasMemory is |
| 584 | /// true it means one of the asm constraint of the inline asm instruction |
| 585 | /// being processed is 'm'. |
Dan Gohman | 2ce6f2a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 586 | virtual void LowerAsmOperandForConstraint(SDValue Op, |
Eric Christopher | de9399b | 2011-06-02 23:16:42 +0000 | [diff] [blame] | 587 | std::string &Constraint, |
Dan Gohman | 2ce6f2a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 588 | std::vector<SDValue> &Ops, |
Chris Lattner | 724539c | 2008-04-26 23:02:14 +0000 | [diff] [blame] | 589 | SelectionDAG &DAG) const; |
Michael J. Spencer | 9cafc87 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 590 | |
Chris Lattner | f4aeff0 | 2006-10-18 18:26:48 +0000 | [diff] [blame] | 591 | /// getRegForInlineAsmConstraint - Given a physical register constraint |
| 592 | /// (e.g. {edx}), return the register number and the register class for the |
| 593 | /// register. This should only be used for C_Register constraints. On |
| 594 | /// error, this returns a register number of 0. |
Michael J. Spencer | 9cafc87 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 595 | std::pair<unsigned, const TargetRegisterClass*> |
Chris Lattner | 524129d | 2006-07-31 23:26:50 +0000 | [diff] [blame] | 596 | getRegForInlineAsmConstraint(const std::string &Constraint, |
Owen Anderson | 53aa7a9 | 2009-08-10 22:56:29 +0000 | [diff] [blame] | 597 | EVT VT) const; |
Michael J. Spencer | 9cafc87 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 598 | |
Chris Lattner | 1eb94d9 | 2007-03-30 23:15:24 +0000 | [diff] [blame] | 599 | /// isLegalAddressingMode - Return true if the addressing mode represented |
| 600 | /// by AM is legal for this target, for a load/store of the specified type. |
Chris Lattner | 229907c | 2011-07-18 04:54:35 +0000 | [diff] [blame] | 601 | virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const; |
Chris Lattner | 1eb94d9 | 2007-03-30 23:15:24 +0000 | [diff] [blame] | 602 | |
Evan Cheng | f579bec | 2012-07-17 06:53:39 +0000 | [diff] [blame] | 603 | /// isLegalICmpImmediate - Return true if the specified immediate is legal |
| 604 | /// icmp immediate, that is the target has icmp instructions which can |
| 605 | /// compare a register against the immediate without having to materialize |
| 606 | /// the immediate into a register. |
| 607 | virtual bool isLegalICmpImmediate(int64_t Imm) const; |
| 608 | |
| 609 | /// isLegalAddImmediate - Return true if the specified immediate is legal |
| 610 | /// add immediate, that is the target has add instructions which can |
| 611 | /// add a register and the immediate without having to materialize |
| 612 | /// the immediate into a register. |
| 613 | virtual bool isLegalAddImmediate(int64_t Imm) const; |
| 614 | |
Evan Cheng | 7f3d024 | 2007-10-26 01:56:11 +0000 | [diff] [blame] | 615 | /// isTruncateFree - Return true if it's free to truncate a value of |
| 616 | /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in |
| 617 | /// register EAX to i16 by referencing its sub-register AX. |
Chris Lattner | 229907c | 2011-07-18 04:54:35 +0000 | [diff] [blame] | 618 | virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const; |
Owen Anderson | 53aa7a9 | 2009-08-10 22:56:29 +0000 | [diff] [blame] | 619 | virtual bool isTruncateFree(EVT VT1, EVT VT2) const; |
Dan Gohman | ad3e549 | 2009-04-08 00:15:30 +0000 | [diff] [blame] | 620 | |
| 621 | /// isZExtFree - Return true if any actual instruction that defines a |
| 622 | /// value of type Ty1 implicit zero-extends the value to Ty2 in the result |
| 623 | /// register. This does not necessarily include registers defined in |
| 624 | /// unknown ways, such as incoming arguments, or copies from unknown |
| 625 | /// virtual registers. Also, if isTruncateFree(Ty2, Ty1) is true, this |
| 626 | /// does not necessarily apply to truncate instructions. e.g. on x86-64, |
| 627 | /// all instructions that define 32-bit values implicit zero-extend the |
| 628 | /// result out to 64 bits. |
Chris Lattner | 229907c | 2011-07-18 04:54:35 +0000 | [diff] [blame] | 629 | virtual bool isZExtFree(Type *Ty1, Type *Ty2) const; |
Owen Anderson | 53aa7a9 | 2009-08-10 22:56:29 +0000 | [diff] [blame] | 630 | virtual bool isZExtFree(EVT VT1, EVT VT2) const; |
Dan Gohman | ad3e549 | 2009-04-08 00:15:30 +0000 | [diff] [blame] | 631 | |
Elena Demikhovsky | 3cb3b00 | 2012-08-01 12:06:00 +0000 | [diff] [blame] | 632 | /// isFMAFasterThanMulAndAdd - Return true if an FMA operation is faster than |
| 633 | /// a pair of mul and add instructions. fmuladd intrinsics will be expanded to |
| 634 | /// FMAs when this method returns true (and FMAs are legal), otherwise fmuladd |
| 635 | /// is expanded to mul + add. |
| 636 | virtual bool isFMAFasterThanMulAndAdd(EVT) const { return true; } |
| 637 | |
Evan Cheng | a9cda8a | 2009-05-28 00:35:15 +0000 | [diff] [blame] | 638 | /// isNarrowingProfitable - Return true if it's profitable to narrow |
| 639 | /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow |
| 640 | /// from i32 to i8 but not from i32 to i16. |
Owen Anderson | 53aa7a9 | 2009-08-10 22:56:29 +0000 | [diff] [blame] | 641 | virtual bool isNarrowingProfitable(EVT VT1, EVT VT2) const; |
Evan Cheng | a9cda8a | 2009-05-28 00:35:15 +0000 | [diff] [blame] | 642 | |
Evan Cheng | 16993aa | 2009-10-27 19:56:55 +0000 | [diff] [blame] | 643 | /// isFPImmLegal - Returns true if the target can instruction select the |
| 644 | /// specified FP immediate natively. If false, the legalizer will |
| 645 | /// materialize the FP immediate as a load from a constant pool. |
Evan Cheng | 83896a5 | 2009-10-28 01:43:28 +0000 | [diff] [blame] | 646 | virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const; |
Evan Cheng | 16993aa | 2009-10-27 19:56:55 +0000 | [diff] [blame] | 647 | |
Evan Cheng | 68ad48b | 2006-03-22 18:59:22 +0000 | [diff] [blame] | 648 | /// isShuffleMaskLegal - Targets can use this to indicate that they only |
| 649 | /// support *some* VECTOR_SHUFFLE operations, those with specific masks. |
Chris Lattner | f4aeff0 | 2006-10-18 18:26:48 +0000 | [diff] [blame] | 650 | /// By default, if a target supports the VECTOR_SHUFFLE node, all mask |
| 651 | /// values are assumed to be legal. |
Nate Begeman | 5f829d8 | 2009-04-29 05:20:52 +0000 | [diff] [blame] | 652 | virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask, |
Owen Anderson | 53aa7a9 | 2009-08-10 22:56:29 +0000 | [diff] [blame] | 653 | EVT VT) const; |
Evan Cheng | 60f0b89 | 2006-04-20 08:58:49 +0000 | [diff] [blame] | 654 | |
| 655 | /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is |
| 656 | /// used by Targets can use this to indicate if there is a suitable |
| 657 | /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant |
| 658 | /// pool entry. |
Nate Begeman | 5f829d8 | 2009-04-29 05:20:52 +0000 | [diff] [blame] | 659 | virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask, |
Owen Anderson | 53aa7a9 | 2009-08-10 22:56:29 +0000 | [diff] [blame] | 660 | EVT VT) const; |
Evan Cheng | 0a62cb4 | 2008-03-05 01:30:59 +0000 | [diff] [blame] | 661 | |
| 662 | /// ShouldShrinkFPConstant - If true, then instruction selection should |
| 663 | /// seek to shrink the FP constant of the specified type to a smaller type |
| 664 | /// in order to save space and / or reduce runtime. |
Owen Anderson | 53aa7a9 | 2009-08-10 22:56:29 +0000 | [diff] [blame] | 665 | virtual bool ShouldShrinkFPConstant(EVT VT) const { |
Evan Cheng | 0a62cb4 | 2008-03-05 01:30:59 +0000 | [diff] [blame] | 666 | // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more |
| 667 | // expensive than a straight movsd. On the other hand, it's important to |
| 668 | // shrink long double fp constant since fldt is very slow. |
Owen Anderson | 9f94459 | 2009-08-11 20:47:22 +0000 | [diff] [blame] | 669 | return !X86ScalarSSEf64 || VT == MVT::f80; |
Evan Cheng | 0a62cb4 | 2008-03-05 01:30:59 +0000 | [diff] [blame] | 670 | } |
Michael J. Spencer | 9cafc87 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 671 | |
Dan Gohman | 4df9d9c | 2010-05-11 16:21:03 +0000 | [diff] [blame] | 672 | const X86Subtarget* getSubtarget() const { |
Dan Gohman | 544ab2c | 2008-04-12 04:36:06 +0000 | [diff] [blame] | 673 | return Subtarget; |
Rafael Espindola | fa0df55 | 2007-11-05 23:12:20 +0000 | [diff] [blame] | 674 | } |
| 675 | |
Chris Lattner | 7dc00e8 | 2008-01-18 06:52:41 +0000 | [diff] [blame] | 676 | /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is |
| 677 | /// computed in an SSE register, not on the X87 floating point stack. |
Owen Anderson | 53aa7a9 | 2009-08-10 22:56:29 +0000 | [diff] [blame] | 678 | bool isScalarFPTypeInSSEReg(EVT VT) const { |
Owen Anderson | 9f94459 | 2009-08-11 20:47:22 +0000 | [diff] [blame] | 679 | return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2 |
| 680 | (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1 |
Chris Lattner | 7dc00e8 | 2008-01-18 06:52:41 +0000 | [diff] [blame] | 681 | } |
Dan Gohman | 4619e93 | 2008-08-19 21:32:53 +0000 | [diff] [blame] | 682 | |
Michael J. Spencer | 248d65e | 2012-02-24 19:01:22 +0000 | [diff] [blame] | 683 | /// isTargetFTOL - Return true if the target uses the MSVC _ftol2 routine |
| 684 | /// for fptoui. |
| 685 | bool isTargetFTOL() const { |
| 686 | return Subtarget->isTargetWindows() && !Subtarget->is64Bit(); |
| 687 | } |
| 688 | |
| 689 | /// isIntegerTypeFTOL - Return true if the MSVC _ftol2 routine should be |
| 690 | /// used for fptoui to the given type. |
| 691 | bool isIntegerTypeFTOL(EVT VT) const { |
| 692 | return isTargetFTOL() && VT == MVT::i64; |
| 693 | } |
| 694 | |
Dan Gohman | 4619e93 | 2008-08-19 21:32:53 +0000 | [diff] [blame] | 695 | /// createFastISel - This method returns a target specific FastISel object, |
| 696 | /// or null if the target does not support "fast" ISel. |
Bob Wilson | 3e6fa46 | 2012-08-03 04:06:28 +0000 | [diff] [blame] | 697 | virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo, |
| 698 | const TargetLibraryInfo *libInfo) const; |
Bill Wendling | 31ceb1b | 2009-06-30 22:38:32 +0000 | [diff] [blame] | 699 | |
Eric Christopher | 2ad0c77 | 2010-07-06 05:18:56 +0000 | [diff] [blame] | 700 | /// getStackCookieLocation - Return true if the target stores stack |
| 701 | /// protector cookies at a fixed offset in some non-standard address |
| 702 | /// space, and populates the address space and offset as |
| 703 | /// appropriate. |
| 704 | virtual bool getStackCookieLocation(unsigned &AddressSpace, unsigned &Offset) const; |
| 705 | |
Stuart Hastings | e0d3426 | 2011-06-06 23:15:58 +0000 | [diff] [blame] | 706 | SDValue BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot, |
| 707 | SelectionDAG &DAG) const; |
| 708 | |
Evan Cheng | d4218b8 | 2010-07-26 21:50:05 +0000 | [diff] [blame] | 709 | protected: |
| 710 | std::pair<const TargetRegisterClass*, uint8_t> |
| 711 | findRepresentativeClass(EVT VT) const; |
| 712 | |
Chris Lattner | 76ac068 | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 713 | private: |
Evan Cheng | a9467aa | 2006-04-25 20:13:52 +0000 | [diff] [blame] | 714 | /// Subtarget - Keep a pointer to the X86Subtarget around so that we can |
| 715 | /// make the right decision when generating code for different targets. |
| 716 | const X86Subtarget *Subtarget; |
Dan Gohman | eabd647 | 2008-05-14 01:58:56 +0000 | [diff] [blame] | 717 | const X86RegisterInfo *RegInfo; |
Micah Villmow | cdfe20b | 2012-10-08 16:38:25 +0000 | [diff] [blame] | 718 | const DataLayout *TD; |
Evan Cheng | a9467aa | 2006-04-25 20:13:52 +0000 | [diff] [blame] | 719 | |
Michael J. Spencer | 9cafc87 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 720 | /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87 |
Dale Johannesen | e36c400 | 2007-09-23 14:52:20 +0000 | [diff] [blame] | 721 | /// floating point ops. |
| 722 | /// When SSE is available, use it for f32 operations. |
| 723 | /// When SSE2 is available, use it for f64 operations. |
| 724 | bool X86ScalarSSEf32; |
| 725 | bool X86ScalarSSEf64; |
Evan Cheng | 084a1cd | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 726 | |
Evan Cheng | 16993aa | 2009-10-27 19:56:55 +0000 | [diff] [blame] | 727 | /// LegalFPImmediates - A list of legal fp immediates. |
| 728 | std::vector<APFloat> LegalFPImmediates; |
| 729 | |
| 730 | /// addLegalFPImmediate - Indicate that this x86 target can instruction |
| 731 | /// select the specified FP immediate natively. |
| 732 | void addLegalFPImmediate(const APFloat& Imm) { |
| 733 | LegalFPImmediates.push_back(Imm); |
| 734 | } |
| 735 | |
Dan Gohman | f9bbcd1 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 736 | SDValue LowerCallResult(SDValue Chain, SDValue InFlag, |
Sandeep Patel | 68c5f47 | 2009-09-02 08:44:58 +0000 | [diff] [blame] | 737 | CallingConv::ID CallConv, bool isVarArg, |
Dan Gohman | f9bbcd1 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 738 | const SmallVectorImpl<ISD::InputArg> &Ins, |
| 739 | DebugLoc dl, SelectionDAG &DAG, |
Dan Gohman | 21cea8a | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 740 | SmallVectorImpl<SDValue> &InVals) const; |
Dan Gohman | f9bbcd1 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 741 | SDValue LowerMemArgument(SDValue Chain, |
Sandeep Patel | 68c5f47 | 2009-09-02 08:44:58 +0000 | [diff] [blame] | 742 | CallingConv::ID CallConv, |
Dan Gohman | f9bbcd1 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 743 | const SmallVectorImpl<ISD::InputArg> &ArgInfo, |
| 744 | DebugLoc dl, SelectionDAG &DAG, |
| 745 | const CCValAssign &VA, MachineFrameInfo *MFI, |
Dan Gohman | 21cea8a | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 746 | unsigned i) const; |
Dan Gohman | f9bbcd1 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 747 | SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg, |
| 748 | DebugLoc dl, SelectionDAG &DAG, |
| 749 | const CCValAssign &VA, |
Dan Gohman | 21cea8a | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 750 | ISD::ArgFlagsTy Flags) const; |
Rafael Espindola | e636fc0 | 2007-08-31 15:06:30 +0000 | [diff] [blame] | 751 | |
Gordon Henriksen | 9231958 | 2008-01-05 16:56:59 +0000 | [diff] [blame] | 752 | // Call lowering helpers. |
Evan Cheng | 67a69dd | 2010-01-27 00:07:07 +0000 | [diff] [blame] | 753 | |
| 754 | /// IsEligibleForTailCallOptimization - Check whether the call is eligible |
| 755 | /// for tail call optimization. Targets which want to do tail call |
| 756 | /// optimization should implement this function. |
Evan Cheng | 6f36a08 | 2010-02-02 23:55:14 +0000 | [diff] [blame] | 757 | bool IsEligibleForTailCallOptimization(SDValue Callee, |
Evan Cheng | 67a69dd | 2010-01-27 00:07:07 +0000 | [diff] [blame] | 758 | CallingConv::ID CalleeCC, |
| 759 | bool isVarArg, |
Evan Cheng | ae5edee | 2010-03-15 18:54:48 +0000 | [diff] [blame] | 760 | bool isCalleeStructRet, |
| 761 | bool isCallerStructRet, |
Evan Cheng | 446ff28 | 2012-09-25 05:32:34 +0000 | [diff] [blame] | 762 | Type *RetTy, |
Evan Cheng | 85476f3 | 2010-01-27 06:25:16 +0000 | [diff] [blame] | 763 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
Dan Gohman | fe7532a | 2010-07-07 15:54:55 +0000 | [diff] [blame] | 764 | const SmallVectorImpl<SDValue> &OutVals, |
Evan Cheng | 85476f3 | 2010-01-27 06:25:16 +0000 | [diff] [blame] | 765 | const SmallVectorImpl<ISD::InputArg> &Ins, |
Evan Cheng | 67a69dd | 2010-01-27 00:07:07 +0000 | [diff] [blame] | 766 | SelectionDAG& DAG) const; |
Dan Gohman | 21cea8a | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 767 | bool IsCalleePop(bool isVarArg, CallingConv::ID CallConv) const; |
Dan Gohman | 2ce6f2a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 768 | SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr, |
| 769 | SDValue Chain, bool IsTailCall, bool Is64Bit, |
Dan Gohman | 21cea8a | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 770 | int FPDiff, DebugLoc dl) const; |
Arnold Schwaighofer | 634fc9a | 2008-04-12 18:11:06 +0000 | [diff] [blame] | 771 | |
Dan Gohman | 21cea8a | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 772 | unsigned GetAlignedArgumentStackSize(unsigned StackSize, |
| 773 | SelectionDAG &DAG) const; |
Evan Cheng | cde9e30 | 2006-01-27 08:10:46 +0000 | [diff] [blame] | 774 | |
Eli Friedman | dfe4f25 | 2009-05-23 09:59:16 +0000 | [diff] [blame] | 775 | std::pair<SDValue,SDValue> FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, |
NAKAMURA Takumi | bdf9487 | 2012-02-25 03:37:25 +0000 | [diff] [blame] | 776 | bool isSigned, |
| 777 | bool isReplace) const; |
Evan Cheng | 493b882 | 2009-12-09 21:00:30 +0000 | [diff] [blame] | 778 | |
| 779 | SDValue LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl, |
Dan Gohman | 21cea8a | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 780 | SelectionDAG &DAG) const; |
| 781 | SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; |
Dan Gohman | 21cea8a | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 782 | SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; |
| 783 | SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; |
| 784 | SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) const; |
| 785 | SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; |
| 786 | SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) const; |
Dan Gohman | 21cea8a | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 787 | SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; |
| 788 | SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; |
Dale Johannesen | 021052a | 2009-02-04 20:06:27 +0000 | [diff] [blame] | 789 | SDValue LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl, |
| 790 | int64_t Offset, SelectionDAG &DAG) const; |
Dan Gohman | 21cea8a | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 791 | SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; |
| 792 | SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; |
| 793 | SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const; |
Nadav Rotem | 8f971c2 | 2011-05-11 08:12:09 +0000 | [diff] [blame] | 794 | SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) const; |
Wesley Peck | 527da1b | 2010-11-23 03:31:01 +0000 | [diff] [blame] | 795 | SDValue LowerBITCAST(SDValue op, SelectionDAG &DAG) const; |
Dan Gohman | 21cea8a | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 796 | SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; |
| 797 | SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; |
| 798 | SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) const; |
| 799 | SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG) const; |
Michael Liao | c03c03d | 2012-10-23 17:36:08 +0000 | [diff] [blame] | 800 | SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG) const; |
Michael Liao | 02ca345 | 2012-10-16 18:14:11 +0000 | [diff] [blame] | 801 | SDValue lowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const; |
Michael Liao | c03c03d | 2012-10-23 17:36:08 +0000 | [diff] [blame] | 802 | SDValue lowerZERO_EXTEND(SDValue Op, SelectionDAG &DAG) const; |
Dan Gohman | 21cea8a | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 803 | SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const; |
| 804 | SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const; |
Michael Liao | effae0c | 2012-10-10 16:32:15 +0000 | [diff] [blame] | 805 | SDValue lowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const; |
Dan Gohman | 21cea8a | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 806 | SDValue LowerFABS(SDValue Op, SelectionDAG &DAG) const; |
| 807 | SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG) const; |
| 808 | SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const; |
Evan Cheng | 9c8cd8c | 2010-04-21 01:47:12 +0000 | [diff] [blame] | 809 | SDValue LowerToBT(SDValue And, ISD::CondCode CC, |
| 810 | DebugLoc dl, SelectionDAG &DAG) const; |
Dan Gohman | 21cea8a | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 811 | SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const; |
| 812 | SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const; |
| 813 | SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const; |
| 814 | SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const; |
| 815 | SDValue LowerMEMSET(SDValue Op, SelectionDAG &DAG) const; |
| 816 | SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; |
| 817 | SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; |
| 818 | SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; |
| 819 | SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const; |
Dan Gohman | 21cea8a | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 820 | SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; |
| 821 | SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; |
| 822 | SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const; |
| 823 | SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const; |
Michael Liao | 97bf363 | 2012-10-15 22:39:43 +0000 | [diff] [blame] | 824 | SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const; |
| 825 | SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const; |
Duncan Sands | a098436 | 2011-09-06 13:37:06 +0000 | [diff] [blame] | 826 | SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const; |
Dan Gohman | 21cea8a | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 827 | SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; |
Nadav Rotem | 8f971c2 | 2011-05-11 08:12:09 +0000 | [diff] [blame] | 828 | SDValue LowerShift(SDValue Op, SelectionDAG &DAG) const; |
Bill Wendling | 6683547 | 2008-11-24 19:21:46 +0000 | [diff] [blame] | 829 | |
Nadav Rotem | 771f296 | 2011-07-14 11:11:14 +0000 | [diff] [blame] | 830 | SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const; |
Duncan Sands | 6ed4014 | 2008-12-01 11:39:25 +0000 | [diff] [blame] | 831 | |
Michael Liao | 4b7ccfc | 2012-10-19 17:15:18 +0000 | [diff] [blame] | 832 | // Utility functions to help LowerVECTOR_SHUFFLE & LowerBUILD_VECTOR |
Craig Topper | a29ed86 | 2012-09-11 06:15:32 +0000 | [diff] [blame] | 833 | SDValue LowerVectorBroadcast(SDValue Op, SelectionDAG &DAG) const; |
Nadav Rotem | b801ca3 | 2012-04-09 07:45:58 +0000 | [diff] [blame] | 834 | SDValue NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG) const; |
Michael Liao | 4b7ccfc | 2012-10-19 17:15:18 +0000 | [diff] [blame] | 835 | SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) const; |
Bruno Cardoso Lopes | 9f20e7a | 2010-08-21 01:32:18 +0000 | [diff] [blame] | 836 | |
Michael Liao | 137f8ae | 2012-09-13 20:24:54 +0000 | [diff] [blame] | 837 | SDValue LowerVectorAllZeroTest(SDValue Op, SelectionDAG &DAG) const; |
| 838 | |
Michael Liao | 1be96bb | 2012-10-23 17:34:00 +0000 | [diff] [blame] | 839 | SDValue lowerVectorIntExtend(SDValue Op, SelectionDAG &DAG) const; |
| 840 | |
Dan Gohman | f9bbcd1 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 841 | virtual SDValue |
| 842 | LowerFormalArguments(SDValue Chain, |
Sandeep Patel | 68c5f47 | 2009-09-02 08:44:58 +0000 | [diff] [blame] | 843 | CallingConv::ID CallConv, bool isVarArg, |
Dan Gohman | f9bbcd1 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 844 | const SmallVectorImpl<ISD::InputArg> &Ins, |
| 845 | DebugLoc dl, SelectionDAG &DAG, |
Dan Gohman | 21cea8a | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 846 | SmallVectorImpl<SDValue> &InVals) const; |
Dan Gohman | f9bbcd1 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 847 | virtual SDValue |
Justin Holewinski | aa58397 | 2012-05-25 16:35:28 +0000 | [diff] [blame] | 848 | LowerCall(CallLoweringInfo &CLI, |
Dan Gohman | 21cea8a | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 849 | SmallVectorImpl<SDValue> &InVals) const; |
Dan Gohman | f9bbcd1 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 850 | |
| 851 | virtual SDValue |
| 852 | LowerReturn(SDValue Chain, |
Sandeep Patel | 68c5f47 | 2009-09-02 08:44:58 +0000 | [diff] [blame] | 853 | CallingConv::ID CallConv, bool isVarArg, |
Dan Gohman | f9bbcd1 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 854 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
Dan Gohman | fe7532a | 2010-07-07 15:54:55 +0000 | [diff] [blame] | 855 | const SmallVectorImpl<SDValue> &OutVals, |
Dan Gohman | 21cea8a | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 856 | DebugLoc dl, SelectionDAG &DAG) const; |
Dan Gohman | f9bbcd1 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 857 | |
Evan Cheng | f8bad08 | 2012-04-10 01:51:00 +0000 | [diff] [blame] | 858 | virtual bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const; |
Evan Cheng | d4b0873 | 2010-11-30 23:55:39 +0000 | [diff] [blame] | 859 | |
Evan Cheng | 0663f23 | 2011-03-21 01:19:09 +0000 | [diff] [blame] | 860 | virtual bool mayBeEmittedAsTailCall(CallInst *CI) const; |
| 861 | |
Cameron Zwarich | 2ef0c69 | 2011-03-17 14:53:37 +0000 | [diff] [blame] | 862 | virtual EVT |
| 863 | getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT, |
| 864 | ISD::NodeType ExtendKind) const; |
Cameron Zwarich | ac10627 | 2011-03-16 22:20:18 +0000 | [diff] [blame] | 865 | |
Kenneth Uildriks | 0711973 | 2009-11-07 02:11:54 +0000 | [diff] [blame] | 866 | virtual bool |
Eric Christopher | 0713a9d | 2011-06-08 23:55:35 +0000 | [diff] [blame] | 867 | CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, |
Bill Wendling | 318f03f | 2012-07-19 00:15:11 +0000 | [diff] [blame] | 868 | bool isVarArg, |
| 869 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 870 | LLVMContext &Context) const; |
Kenneth Uildriks | 0711973 | 2009-11-07 02:11:54 +0000 | [diff] [blame] | 871 | |
Michael Liao | 3237662 | 2012-09-20 03:06:15 +0000 | [diff] [blame] | 872 | /// Utility function to emit atomic-load-arith operations (and, or, xor, |
| 873 | /// nand, max, min, umax, umin). It takes the corresponding instruction to |
| 874 | /// expand, the associated machine basic block, and the associated X86 |
| 875 | /// opcodes for reg/reg. |
| 876 | MachineBasicBlock *EmitAtomicLoadArith(MachineInstr *MI, |
| 877 | MachineBasicBlock *MBB) const; |
Dale Johannesen | 867d549 | 2008-10-02 18:53:47 +0000 | [diff] [blame] | 878 | |
Michael Liao | 3237662 | 2012-09-20 03:06:15 +0000 | [diff] [blame] | 879 | /// Utility function to emit atomic-load-arith operations (and, or, xor, |
| 880 | /// nand, add, sub, swap) for 64-bit operands on 32-bit target. |
| 881 | MachineBasicBlock *EmitAtomicLoadArith6432(MachineInstr *MI, |
| 882 | MachineBasicBlock *MBB) const; |
Dan Gohman | 55d7b2a | 2009-03-04 19:44:21 +0000 | [diff] [blame] | 883 | |
Dan Gohman | 395a898 | 2010-10-12 18:00:49 +0000 | [diff] [blame] | 884 | // Utility function to emit the low-level va_arg code for X86-64. |
| 885 | MachineBasicBlock *EmitVAARG64WithCustomInserter( |
| 886 | MachineInstr *MI, |
| 887 | MachineBasicBlock *MBB) const; |
| 888 | |
Dan Gohman | 0700a56 | 2009-08-15 01:38:56 +0000 | [diff] [blame] | 889 | /// Utility function to emit the xmm reg save portion of va_start. |
| 890 | MachineBasicBlock *EmitVAStartSaveXMMRegsWithCustomInserter( |
| 891 | MachineInstr *BInstr, |
| 892 | MachineBasicBlock *BB) const; |
| 893 | |
Chris Lattner | d5f4fcc | 2009-09-02 05:57:00 +0000 | [diff] [blame] | 894 | MachineBasicBlock *EmitLoweredSelect(MachineInstr *I, |
Dan Gohman | 25c1653 | 2010-05-01 00:01:06 +0000 | [diff] [blame] | 895 | MachineBasicBlock *BB) const; |
Anton Korobeynikov | d5e3fd6 | 2010-03-06 19:32:29 +0000 | [diff] [blame] | 896 | |
Michael J. Spencer | f509c6c | 2010-10-21 01:41:01 +0000 | [diff] [blame] | 897 | MachineBasicBlock *EmitLoweredWinAlloca(MachineInstr *MI, |
Dan Gohman | 25c1653 | 2010-05-01 00:01:06 +0000 | [diff] [blame] | 898 | MachineBasicBlock *BB) const; |
Michael J. Spencer | 9cafc87 | 2010-10-20 23:40:27 +0000 | [diff] [blame] | 899 | |
Rafael Espindola | 94d3253 | 2011-08-30 19:47:04 +0000 | [diff] [blame] | 900 | MachineBasicBlock *EmitLoweredSegAlloca(MachineInstr *MI, |
| 901 | MachineBasicBlock *BB, |
| 902 | bool Is64Bit) const; |
| 903 | |
Eric Christopher | b0e1a45 | 2010-06-03 04:07:48 +0000 | [diff] [blame] | 904 | MachineBasicBlock *EmitLoweredTLSCall(MachineInstr *MI, |
| 905 | MachineBasicBlock *BB) const; |
Anton Korobeynikov | d5e3fd6 | 2010-03-06 19:32:29 +0000 | [diff] [blame] | 906 | |
Rafael Espindola | 5d88289 | 2010-11-27 20:43:02 +0000 | [diff] [blame] | 907 | MachineBasicBlock *emitLoweredTLSAddr(MachineInstr *MI, |
| 908 | MachineBasicBlock *BB) const; |
| 909 | |
Michael Liao | 97bf363 | 2012-10-15 22:39:43 +0000 | [diff] [blame] | 910 | MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr *MI, |
| 911 | MachineBasicBlock *MBB) const; |
| 912 | |
| 913 | MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr *MI, |
| 914 | MachineBasicBlock *MBB) const; |
| 915 | |
Dan Gohman | 55d7b2a | 2009-03-04 19:44:21 +0000 | [diff] [blame] | 916 | /// Emit nodes that will be selected as "test Op0,Op0", or something |
Dan Gohman | ff659b5 | 2009-03-07 01:58:32 +0000 | [diff] [blame] | 917 | /// equivalent, for use with the given x86 condition code. |
Evan Cheng | 6e45f1d | 2010-04-26 19:06:11 +0000 | [diff] [blame] | 918 | SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG) const; |
Dan Gohman | 55d7b2a | 2009-03-04 19:44:21 +0000 | [diff] [blame] | 919 | |
| 920 | /// Emit nodes that will be selected as "cmp Op0,Op1", or something |
Dan Gohman | ff659b5 | 2009-03-07 01:58:32 +0000 | [diff] [blame] | 921 | /// equivalent, for use with the given x86 condition code. |
Evan Cheng | 6e45f1d | 2010-04-26 19:06:11 +0000 | [diff] [blame] | 922 | SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, |
Dan Gohman | 21cea8a | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 923 | SelectionDAG &DAG) const; |
Benjamin Kramer | 913da4b | 2012-04-27 12:07:43 +0000 | [diff] [blame] | 924 | |
| 925 | /// Convert a comparison if required by the subtarget. |
| 926 | SDValue ConvertCmpIfNecessary(SDValue Cmp, SelectionDAG &DAG) const; |
Chris Lattner | 76ac068 | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 927 | }; |
Evan Cheng | 24422d4 | 2008-09-03 00:03:49 +0000 | [diff] [blame] | 928 | |
| 929 | namespace X86 { |
Bob Wilson | 3e6fa46 | 2012-08-03 04:06:28 +0000 | [diff] [blame] | 930 | FastISel *createFastISel(FunctionLoweringInfo &funcInfo, |
| 931 | const TargetLibraryInfo *libInfo); |
Evan Cheng | 24422d4 | 2008-09-03 00:03:49 +0000 | [diff] [blame] | 932 | } |
Nadav Rotem | 23848f8 | 2012-11-02 23:27:16 +0000 | [diff] [blame] | 933 | |
Shuxin Yang | abcc370 | 2012-11-29 19:38:54 +0000 | [diff] [blame] | 934 | class X86ScalarTargetTransformImpl : public ScalarTargetTransformImpl { |
| 935 | public: |
| 936 | explicit X86ScalarTargetTransformImpl(const TargetLowering *TL) : |
| 937 | ScalarTargetTransformImpl(TL) {}; |
| 938 | |
| 939 | virtual PopcntHwSupport getPopcntHwSupport(unsigned TyWidth) const; |
| 940 | }; |
| 941 | |
Nadav Rotem | 23848f8 | 2012-11-02 23:27:16 +0000 | [diff] [blame] | 942 | class X86VectorTargetTransformInfo : public VectorTargetTransformImpl { |
| 943 | public: |
| 944 | explicit X86VectorTargetTransformInfo(const TargetLowering *TL) : |
| 945 | VectorTargetTransformImpl(TL) {} |
| 946 | |
Nadav Rotem | c2345cb | 2012-11-03 00:39:56 +0000 | [diff] [blame] | 947 | virtual unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty) const; |
| 948 | |
Nadav Rotem | 23848f8 | 2012-11-02 23:27:16 +0000 | [diff] [blame] | 949 | virtual unsigned getVectorInstrCost(unsigned Opcode, Type *Val, |
Nadav Rotem | c2345cb | 2012-11-03 00:39:56 +0000 | [diff] [blame] | 950 | unsigned Index) const; |
Nadav Rotem | 23848f8 | 2012-11-02 23:27:16 +0000 | [diff] [blame] | 951 | |
Nadav Rotem | c378a80 | 2012-11-05 23:48:20 +0000 | [diff] [blame] | 952 | unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, |
| 953 | Type *CondTy) const; |
Nadav Rotem | 0914f0b | 2012-11-06 19:33:53 +0000 | [diff] [blame] | 954 | |
| 955 | virtual unsigned getCastInstrCost(unsigned Opcode, Type *Dst, |
| 956 | Type *Src) const; |
Nadav Rotem | c378a80 | 2012-11-05 23:48:20 +0000 | [diff] [blame] | 957 | }; |
Chris Lattner | 76ac068 | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 958 | } |
| 959 | |
Chris Lattner | 76ac068 | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 960 | #endif // X86ISELLOWERING_H |