Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 1 | //===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
Chris Lattner | 4ee451d | 2007-12-29 20:36:04 +0000 | [diff] [blame] | 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file defines the interfaces that X86 uses to lower LLVM code into a |
| 11 | // selection DAG. |
| 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
| 15 | #ifndef X86ISELLOWERING_H |
| 16 | #define X86ISELLOWERING_H |
| 17 | |
Evan Cheng | 559806f | 2006-01-27 08:10:46 +0000 | [diff] [blame] | 18 | #include "X86Subtarget.h" |
Anton Korobeynikov | 2365f51 | 2007-07-14 14:06:15 +0000 | [diff] [blame] | 19 | #include "X86RegisterInfo.h" |
Gordon Henriksen | 8673766 | 2008-01-05 16:56:59 +0000 | [diff] [blame] | 20 | #include "X86MachineFunctionInfo.h" |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 21 | #include "llvm/Target/TargetLowering.h" |
Evan Cheng | c3f44b0 | 2008-09-03 00:03:49 +0000 | [diff] [blame] | 22 | #include "llvm/CodeGen/FastIsel.h" |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 23 | #include "llvm/CodeGen/SelectionDAG.h" |
Rafael Espindola | 1b5dcc3 | 2007-08-31 15:06:30 +0000 | [diff] [blame] | 24 | #include "llvm/CodeGen/CallingConvLower.h" |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 25 | |
| 26 | namespace llvm { |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 27 | namespace X86ISD { |
Evan Cheng | d9558e0 | 2006-01-06 00:43:03 +0000 | [diff] [blame] | 28 | // X86 Specific DAG Nodes |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 29 | enum NodeType { |
| 30 | // Start the numbering where the builtin ops leave off. |
Evan Cheng | 7df96d6 | 2005-12-17 01:21:05 +0000 | [diff] [blame] | 31 | FIRST_NUMBER = ISD::BUILTIN_OP_END+X86::INSTRUCTION_LIST_END, |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 32 | |
Evan Cheng | 18efe26 | 2007-12-14 02:13:44 +0000 | [diff] [blame] | 33 | /// BSF - Bit scan forward. |
| 34 | /// BSR - Bit scan reverse. |
| 35 | BSF, |
| 36 | BSR, |
| 37 | |
Evan Cheng | e341316 | 2006-01-09 18:33:28 +0000 | [diff] [blame] | 38 | /// SHLD, SHRD - Double shift instructions. These correspond to |
| 39 | /// X86::SHLDxx and X86::SHRDxx instructions. |
| 40 | SHLD, |
| 41 | SHRD, |
| 42 | |
Evan Cheng | ef6ffb1 | 2006-01-31 03:14:29 +0000 | [diff] [blame] | 43 | /// FAND - Bitwise logical AND of floating point values. This corresponds |
| 44 | /// to X86::ANDPS or X86::ANDPD. |
| 45 | FAND, |
| 46 | |
Evan Cheng | 68c47cb | 2007-01-05 07:55:56 +0000 | [diff] [blame] | 47 | /// FOR - Bitwise logical OR of floating point values. This corresponds |
| 48 | /// to X86::ORPS or X86::ORPD. |
| 49 | FOR, |
| 50 | |
Evan Cheng | 223547a | 2006-01-31 22:28:30 +0000 | [diff] [blame] | 51 | /// FXOR - Bitwise logical XOR of floating point values. This corresponds |
| 52 | /// to X86::XORPS or X86::XORPD. |
| 53 | FXOR, |
| 54 | |
Evan Cheng | 73d6cf1 | 2007-01-05 21:37:56 +0000 | [diff] [blame] | 55 | /// FSRL - Bitwise logical right shift of floating point values. These |
| 56 | /// corresponds to X86::PSRLDQ. |
Evan Cheng | 68c47cb | 2007-01-05 07:55:56 +0000 | [diff] [blame] | 57 | FSRL, |
| 58 | |
Evan Cheng | e3de85b | 2006-02-04 02:20:30 +0000 | [diff] [blame] | 59 | /// FILD, FILD_FLAG - This instruction implements SINT_TO_FP with the |
| 60 | /// integer source in memory and FP reg result. This corresponds to the |
| 61 | /// X86::FILD*m instructions. It has three inputs (token chain, address, |
| 62 | /// and source type) and two outputs (FP value and token chain). FILD_FLAG |
| 63 | /// also produces a flag). |
Evan Cheng | a3195e8 | 2006-01-12 22:54:21 +0000 | [diff] [blame] | 64 | FILD, |
Evan Cheng | e3de85b | 2006-02-04 02:20:30 +0000 | [diff] [blame] | 65 | FILD_FLAG, |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 66 | |
| 67 | /// FP_TO_INT*_IN_MEM - This instruction implements FP_TO_SINT with the |
| 68 | /// integer destination in memory and a FP reg source. This corresponds |
| 69 | /// to the X86::FIST*m instructions and the rounding mode change stuff. It |
Chris Lattner | 9189777 | 2006-10-18 18:26:48 +0000 | [diff] [blame] | 70 | /// has two inputs (token chain and address) and two outputs (int value |
| 71 | /// and token chain). |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 72 | FP_TO_INT16_IN_MEM, |
| 73 | FP_TO_INT32_IN_MEM, |
| 74 | FP_TO_INT64_IN_MEM, |
| 75 | |
Evan Cheng | b077b84 | 2005-12-21 02:39:21 +0000 | [diff] [blame] | 76 | /// FLD - This instruction implements an extending load to FP stack slots. |
| 77 | /// This corresponds to the X86::FLD32m / X86::FLD64m. It takes a chain |
Evan Cheng | 38bcbaf | 2005-12-23 07:31:11 +0000 | [diff] [blame] | 78 | /// operand, ptr to load from, and a ValueType node indicating the type |
| 79 | /// to load to. |
Evan Cheng | b077b84 | 2005-12-21 02:39:21 +0000 | [diff] [blame] | 80 | FLD, |
| 81 | |
Evan Cheng | d90eb7f | 2006-01-05 00:27:02 +0000 | [diff] [blame] | 82 | /// FST - This instruction implements a truncating store to FP stack |
| 83 | /// slots. This corresponds to the X86::FST32m / X86::FST64m. It takes a |
| 84 | /// chain operand, value to store, address, and a ValueType to store it |
| 85 | /// as. |
| 86 | FST, |
| 87 | |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 88 | /// CALL/TAILCALL - These operations represent an abstract X86 call |
| 89 | /// instruction, which includes a bunch of information. In particular the |
| 90 | /// operands of these node are: |
| 91 | /// |
| 92 | /// #0 - The incoming token chain |
| 93 | /// #1 - The callee |
| 94 | /// #2 - The number of arg bytes the caller pushes on the stack. |
| 95 | /// #3 - The number of arg bytes the callee pops off the stack. |
| 96 | /// #4 - The value to pass in AL/AX/EAX (optional) |
| 97 | /// #5 - The value to pass in DL/DX/EDX (optional) |
| 98 | /// |
| 99 | /// The result values of these nodes are: |
| 100 | /// |
| 101 | /// #0 - The outgoing token chain |
| 102 | /// #1 - The first register result value (optional) |
| 103 | /// #2 - The second register result value (optional) |
| 104 | /// |
| 105 | /// The CALL vs TAILCALL distinction boils down to whether the callee is |
| 106 | /// known not to modify the caller's stack frame, as is standard with |
| 107 | /// LLVM. |
| 108 | CALL, |
| 109 | TAILCALL, |
Andrew Lenharth | b873ff3 | 2005-11-20 21:41:10 +0000 | [diff] [blame] | 110 | |
| 111 | /// RDTSC_DAG - This operation implements the lowering for |
| 112 | /// readcyclecounter |
| 113 | RDTSC_DAG, |
Evan Cheng | 7df96d6 | 2005-12-17 01:21:05 +0000 | [diff] [blame] | 114 | |
| 115 | /// X86 compare and logical compare instructions. |
Evan Cheng | 7d6ff3a | 2007-09-17 17:42:53 +0000 | [diff] [blame] | 116 | CMP, COMI, UCOMI, |
Evan Cheng | 7df96d6 | 2005-12-17 01:21:05 +0000 | [diff] [blame] | 117 | |
Evan Cheng | d5781fc | 2005-12-21 20:21:51 +0000 | [diff] [blame] | 118 | /// X86 SetCC. Operand 1 is condition code, and operand 2 is the flag |
| 119 | /// operand produced by a CMP instruction. |
| 120 | SETCC, |
| 121 | |
| 122 | /// X86 conditional moves. Operand 1 and operand 2 are the two values |
Chris Lattner | 9189777 | 2006-10-18 18:26:48 +0000 | [diff] [blame] | 123 | /// to select from (operand 1 is a R/W operand). Operand 3 is the |
| 124 | /// condition code, and operand 4 is the flag operand produced by a CMP |
| 125 | /// or TEST instruction. It also writes a flag result. |
Evan Cheng | 7df96d6 | 2005-12-17 01:21:05 +0000 | [diff] [blame] | 126 | CMOV, |
Evan Cheng | 898101c | 2005-12-19 23:12:38 +0000 | [diff] [blame] | 127 | |
Evan Cheng | d5781fc | 2005-12-21 20:21:51 +0000 | [diff] [blame] | 128 | /// X86 conditional branches. Operand 1 is the chain operand, operand 2 |
| 129 | /// is the block to branch if condition is true, operand 3 is the |
| 130 | /// condition code, and operand 4 is the flag operand produced by a CMP |
| 131 | /// or TEST instruction. |
Evan Cheng | 898101c | 2005-12-19 23:12:38 +0000 | [diff] [blame] | 132 | BRCOND, |
Evan Cheng | b077b84 | 2005-12-21 02:39:21 +0000 | [diff] [blame] | 133 | |
Evan Cheng | 67f92a7 | 2006-01-11 22:15:48 +0000 | [diff] [blame] | 134 | /// Return with a flag operand. Operand 1 is the chain operand, operand |
| 135 | /// 2 is the number of bytes of stack to pop. |
Evan Cheng | b077b84 | 2005-12-21 02:39:21 +0000 | [diff] [blame] | 136 | RET_FLAG, |
Evan Cheng | 67f92a7 | 2006-01-11 22:15:48 +0000 | [diff] [blame] | 137 | |
| 138 | /// REP_STOS - Repeat fill, corresponds to X86::REP_STOSx. |
| 139 | REP_STOS, |
| 140 | |
| 141 | /// REP_MOVS - Repeat move, corresponds to X86::REP_MOVSx. |
| 142 | REP_MOVS, |
Evan Cheng | 223547a | 2006-01-31 22:28:30 +0000 | [diff] [blame] | 143 | |
Evan Cheng | 7ccced6 | 2006-02-18 00:15:05 +0000 | [diff] [blame] | 144 | /// GlobalBaseReg - On Darwin, this node represents the result of the popl |
| 145 | /// at function entry, used for PIC code. |
| 146 | GlobalBaseReg, |
Evan Cheng | a0ea053 | 2006-02-23 02:43:52 +0000 | [diff] [blame] | 147 | |
Chris Lattner | 6458f18 | 2006-09-28 23:33:12 +0000 | [diff] [blame] | 148 | /// Wrapper - A wrapper node for TargetConstantPool, |
Evan Cheng | 020d2e8 | 2006-02-23 20:41:18 +0000 | [diff] [blame] | 149 | /// TargetExternalSymbol, and TargetGlobalAddress. |
| 150 | Wrapper, |
Evan Cheng | 48090aa | 2006-03-21 23:01:21 +0000 | [diff] [blame] | 151 | |
Evan Cheng | 0085a28 | 2006-11-30 21:55:46 +0000 | [diff] [blame] | 152 | /// WrapperRIP - Special wrapper used under X86-64 PIC mode for RIP |
| 153 | /// relative displacements. |
| 154 | WrapperRIP, |
| 155 | |
Nate Begeman | 14d12ca | 2008-02-11 04:19:36 +0000 | [diff] [blame] | 156 | /// PEXTRB - Extract an 8-bit value from a vector and zero extend it to |
| 157 | /// i32, corresponds to X86::PEXTRB. |
| 158 | PEXTRB, |
| 159 | |
Evan Cheng | b067a1e | 2006-03-31 19:22:53 +0000 | [diff] [blame] | 160 | /// PEXTRW - Extract a 16-bit value from a vector and zero extend it to |
Evan Cheng | 653159f | 2006-03-31 21:55:24 +0000 | [diff] [blame] | 161 | /// i32, corresponds to X86::PEXTRW. |
Evan Cheng | b067a1e | 2006-03-31 19:22:53 +0000 | [diff] [blame] | 162 | PEXTRW, |
Evan Cheng | 653159f | 2006-03-31 21:55:24 +0000 | [diff] [blame] | 163 | |
Nate Begeman | 14d12ca | 2008-02-11 04:19:36 +0000 | [diff] [blame] | 164 | /// INSERTPS - Insert any element of a 4 x float vector into any element |
| 165 | /// of a destination 4 x floatvector. |
| 166 | INSERTPS, |
| 167 | |
| 168 | /// PINSRB - Insert the lower 8-bits of a 32-bit value to a vector, |
| 169 | /// corresponds to X86::PINSRB. |
| 170 | PINSRB, |
| 171 | |
Evan Cheng | 653159f | 2006-03-31 21:55:24 +0000 | [diff] [blame] | 172 | /// PINSRW - Insert the lower 16-bits of a 32-bit value to a vector, |
| 173 | /// corresponds to X86::PINSRW. |
Evan Cheng | 8ca2932 | 2006-11-10 21:43:37 +0000 | [diff] [blame] | 174 | PINSRW, |
| 175 | |
| 176 | /// FMAX, FMIN - Floating point max and min. |
| 177 | /// |
Lauro Ramos Venancio | b3a0417 | 2007-04-20 21:38:10 +0000 | [diff] [blame] | 178 | FMAX, FMIN, |
Dan Gohman | 2038252 | 2007-07-10 00:05:58 +0000 | [diff] [blame] | 179 | |
| 180 | /// FRSQRT, FRCP - Floating point reciprocal-sqrt and reciprocal |
| 181 | /// approximation. Note that these typically require refinement |
| 182 | /// in order to obtain suitable precision. |
| 183 | FRSQRT, FRCP, |
| 184 | |
Evan Cheng | 7e2ff77 | 2008-05-08 00:57:18 +0000 | [diff] [blame] | 185 | // TLSADDR, THREAThread - Thread Local Storage. |
Anton Korobeynikov | 2365f51 | 2007-07-14 14:06:15 +0000 | [diff] [blame] | 186 | TLSADDR, THREAD_POINTER, |
| 187 | |
Evan Cheng | 7e2ff77 | 2008-05-08 00:57:18 +0000 | [diff] [blame] | 188 | // EH_RETURN - Exception Handling helpers. |
Arnold Schwaighofer | c85e171 | 2007-10-11 19:40:01 +0000 | [diff] [blame] | 189 | EH_RETURN, |
| 190 | |
Arnold Schwaighofer | 4fe3073 | 2008-03-19 16:39:45 +0000 | [diff] [blame] | 191 | /// TC_RETURN - Tail call return. |
| 192 | /// operand #0 chain |
| 193 | /// operand #1 callee (register or absolute) |
| 194 | /// operand #2 stack adjustment |
| 195 | /// operand #3 optional in flag |
Anton Korobeynikov | 45b22fa | 2007-11-16 01:31:51 +0000 | [diff] [blame] | 196 | TC_RETURN, |
| 197 | |
Evan Cheng | 7e2ff77 | 2008-05-08 00:57:18 +0000 | [diff] [blame] | 198 | // LCMPXCHG_DAG, LCMPXCHG8_DAG - Compare and swap. |
Andrew Lenharth | 26ed869 | 2008-03-01 21:52:34 +0000 | [diff] [blame] | 199 | LCMPXCHG_DAG, |
Andrew Lenharth | d19189e | 2008-03-05 01:15:49 +0000 | [diff] [blame] | 200 | LCMPXCHG8_DAG, |
Andrew Lenharth | 26ed869 | 2008-03-01 21:52:34 +0000 | [diff] [blame] | 201 | |
Evan Cheng | 7e2ff77 | 2008-05-08 00:57:18 +0000 | [diff] [blame] | 202 | // FNSTCW16m - Store FP control world into i16 memory. |
| 203 | FNSTCW16m, |
| 204 | |
Evan Cheng | d880b97 | 2008-05-09 21:53:03 +0000 | [diff] [blame] | 205 | // VZEXT_MOVL - Vector move low and zero extend. |
| 206 | VZEXT_MOVL, |
| 207 | |
| 208 | // VZEXT_LOAD - Load, scalar_to_vector, and zero extend. |
Evan Cheng | f26ffe9 | 2008-05-29 08:22:04 +0000 | [diff] [blame] | 209 | VZEXT_LOAD, |
| 210 | |
| 211 | // VSHL, VSRL - Vector logical left / right shift. |
Nate Begeman | 30a0de9 | 2008-07-17 16:51:19 +0000 | [diff] [blame] | 212 | VSHL, VSRL, |
| 213 | |
| 214 | // CMPPD, CMPPS - Vector double/float comparison. |
| 215 | CMPPD, CMPPS, |
| 216 | |
| 217 | // PCMP* - Vector integer comparisons. |
| 218 | PCMPEQB, PCMPEQW, PCMPEQD, PCMPEQQ, |
| 219 | PCMPGTB, PCMPGTW, PCMPGTD, PCMPGTQ |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 220 | }; |
| 221 | } |
| 222 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 223 | /// Define some predicates that are used for node matching. |
| 224 | namespace X86 { |
| 225 | /// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand |
| 226 | /// specifies a shuffle of elements that is suitable for input to PSHUFD. |
| 227 | bool isPSHUFDMask(SDNode *N); |
Evan Cheng | 0188ecb | 2006-03-22 18:59:22 +0000 | [diff] [blame] | 228 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 229 | /// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand |
| 230 | /// specifies a shuffle of elements that is suitable for input to PSHUFD. |
| 231 | bool isPSHUFHWMask(SDNode *N); |
Evan Cheng | 506d3df | 2006-03-29 23:07:14 +0000 | [diff] [blame] | 232 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 233 | /// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand |
| 234 | /// specifies a shuffle of elements that is suitable for input to PSHUFD. |
| 235 | bool isPSHUFLWMask(SDNode *N); |
Evan Cheng | 506d3df | 2006-03-29 23:07:14 +0000 | [diff] [blame] | 236 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 237 | /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand |
| 238 | /// specifies a shuffle of elements that is suitable for input to SHUFP*. |
| 239 | bool isSHUFPMask(SDNode *N); |
Evan Cheng | 14aed5e | 2006-03-24 01:18:28 +0000 | [diff] [blame] | 240 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 241 | /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand |
| 242 | /// specifies a shuffle of elements that is suitable for input to MOVHLPS. |
| 243 | bool isMOVHLPSMask(SDNode *N); |
Evan Cheng | 2c0dbd0 | 2006-03-24 02:58:06 +0000 | [diff] [blame] | 244 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 245 | /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form |
| 246 | /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, |
| 247 | /// <2, 3, 2, 3> |
| 248 | bool isMOVHLPS_v_undef_Mask(SDNode *N); |
Evan Cheng | 6e56e2c | 2006-11-07 22:14:24 +0000 | [diff] [blame] | 249 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 250 | /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand |
| 251 | /// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. |
| 252 | bool isMOVLPMask(SDNode *N); |
Evan Cheng | 5ced1d8 | 2006-04-06 23:23:56 +0000 | [diff] [blame] | 253 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 254 | /// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand |
| 255 | /// specifies a shuffle of elements that is suitable for input to MOVHP{S|D} |
| 256 | /// as well as MOVLHPS. |
| 257 | bool isMOVHPMask(SDNode *N); |
Evan Cheng | 5ced1d8 | 2006-04-06 23:23:56 +0000 | [diff] [blame] | 258 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 259 | /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand |
| 260 | /// specifies a shuffle of elements that is suitable for input to UNPCKL. |
| 261 | bool isUNPCKLMask(SDNode *N, bool V2IsSplat = false); |
Evan Cheng | 0038e59 | 2006-03-28 00:39:58 +0000 | [diff] [blame] | 262 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 263 | /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand |
| 264 | /// specifies a shuffle of elements that is suitable for input to UNPCKH. |
| 265 | bool isUNPCKHMask(SDNode *N, bool V2IsSplat = false); |
Evan Cheng | 4fcb922 | 2006-03-28 02:43:26 +0000 | [diff] [blame] | 266 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 267 | /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form |
| 268 | /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, |
| 269 | /// <0, 0, 1, 1> |
| 270 | bool isUNPCKL_v_undef_Mask(SDNode *N); |
Evan Cheng | 1d5a8cc | 2006-04-05 07:20:06 +0000 | [diff] [blame] | 271 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 272 | /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form |
| 273 | /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef, |
| 274 | /// <2, 2, 3, 3> |
| 275 | bool isUNPCKH_v_undef_Mask(SDNode *N); |
Bill Wendling | 2f9bb1a | 2007-04-24 21:16:55 +0000 | [diff] [blame] | 276 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 277 | /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand |
| 278 | /// specifies a shuffle of elements that is suitable for input to MOVSS, |
| 279 | /// MOVSD, and MOVD, i.e. setting the lowest element. |
| 280 | bool isMOVLMask(SDNode *N); |
Evan Cheng | d6d1cbd | 2006-04-11 00:19:04 +0000 | [diff] [blame] | 281 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 282 | /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand |
| 283 | /// specifies a shuffle of elements that is suitable for input to MOVSHDUP. |
| 284 | bool isMOVSHDUPMask(SDNode *N); |
Evan Cheng | d953947 | 2006-04-14 21:59:03 +0000 | [diff] [blame] | 285 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 286 | /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand |
| 287 | /// specifies a shuffle of elements that is suitable for input to MOVSLDUP. |
| 288 | bool isMOVSLDUPMask(SDNode *N); |
Evan Cheng | d953947 | 2006-04-14 21:59:03 +0000 | [diff] [blame] | 289 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 290 | /// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand |
| 291 | /// specifies a splat of a single element. |
| 292 | bool isSplatMask(SDNode *N); |
Evan Cheng | b9df0ca | 2006-03-22 02:53:00 +0000 | [diff] [blame] | 293 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 294 | /// isSplatLoMask - Return true if the specified VECTOR_SHUFFLE operand |
| 295 | /// specifies a splat of zero element. |
| 296 | bool isSplatLoMask(SDNode *N); |
Evan Cheng | f686d9b | 2006-10-27 21:08:32 +0000 | [diff] [blame] | 297 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 298 | /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle |
| 299 | /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP* |
| 300 | /// instructions. |
| 301 | unsigned getShuffleSHUFImmediate(SDNode *N); |
Evan Cheng | 506d3df | 2006-03-29 23:07:14 +0000 | [diff] [blame] | 302 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 303 | /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle |
| 304 | /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW |
| 305 | /// instructions. |
| 306 | unsigned getShufflePSHUFHWImmediate(SDNode *N); |
Evan Cheng | 506d3df | 2006-03-29 23:07:14 +0000 | [diff] [blame] | 307 | |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 308 | /// getShufflePSHUFKWImmediate - Return the appropriate immediate to shuffle |
| 309 | /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW |
| 310 | /// instructions. |
| 311 | unsigned getShufflePSHUFLWImmediate(SDNode *N); |
| 312 | } |
| 313 | |
Chris Lattner | 9189777 | 2006-10-18 18:26:48 +0000 | [diff] [blame] | 314 | //===--------------------------------------------------------------------===// |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 315 | // X86TargetLowering - X86 Implementation of the TargetLowering interface |
| 316 | class X86TargetLowering : public TargetLowering { |
| 317 | int VarArgsFrameIndex; // FrameIndex for start of varargs area. |
Evan Cheng | 25ab690 | 2006-09-08 06:48:29 +0000 | [diff] [blame] | 318 | int RegSaveFrameIndex; // X86-64 vararg func register save area. |
| 319 | unsigned VarArgsGPOffset; // X86-64 vararg func int reg offset. |
| 320 | unsigned VarArgsFPOffset; // X86-64 vararg func fp reg offset. |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 321 | int BytesToPopOnReturn; // Number of arg bytes ret should pop. |
| 322 | int BytesCallerReserves; // Number of arg bytes caller makes. |
Arnold Schwaighofer | c85e171 | 2007-10-11 19:40:01 +0000 | [diff] [blame] | 323 | |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 324 | public: |
Dan Gohman | c9f5f3f | 2008-05-14 01:58:56 +0000 | [diff] [blame] | 325 | explicit X86TargetLowering(X86TargetMachine &TM); |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 326 | |
Evan Cheng | cc41586 | 2007-11-09 01:32:10 +0000 | [diff] [blame] | 327 | /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC |
| 328 | /// jumptable. |
Dan Gohman | 475871a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 329 | SDValue getPICJumpTableRelocBase(SDValue Table, |
Evan Cheng | cc41586 | 2007-11-09 01:32:10 +0000 | [diff] [blame] | 330 | SelectionDAG &DAG) const; |
| 331 | |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 332 | // Return the number of bytes that a function should pop when it returns (in |
| 333 | // addition to the space used by the return address). |
| 334 | // |
| 335 | unsigned getBytesToPopOnReturn() const { return BytesToPopOnReturn; } |
| 336 | |
| 337 | // Return the number of bytes that the caller reserves for arguments passed |
| 338 | // to this function. |
| 339 | unsigned getBytesCallerReserves() const { return BytesCallerReserves; } |
| 340 | |
Chris Lattner | 54e3efd | 2007-02-26 04:01:25 +0000 | [diff] [blame] | 341 | /// getStackPtrReg - Return the stack pointer register we are using: either |
| 342 | /// ESP or RSP. |
| 343 | unsigned getStackPtrReg() const { return X86StackPtr; } |
Evan Cheng | 2928650 | 2008-01-23 23:17:41 +0000 | [diff] [blame] | 344 | |
| 345 | /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate |
| 346 | /// function arguments in the caller parameter area. For X86, aggregates |
| 347 | /// that contains are placed at 16-byte boundaries while the rest are at |
| 348 | /// 4-byte boundaries. |
| 349 | virtual unsigned getByValTypeAlignment(const Type *Ty) const; |
Evan Cheng | f0df031 | 2008-05-15 08:39:06 +0000 | [diff] [blame] | 350 | |
| 351 | /// getOptimalMemOpType - Returns the target specific optimal type for load |
Evan Cheng | 0ef8de3 | 2008-05-15 22:13:02 +0000 | [diff] [blame] | 352 | /// and store operations as a result of memset, memcpy, and memmove |
| 353 | /// lowering. It returns MVT::iAny if SelectionDAG should be responsible for |
Evan Cheng | f0df031 | 2008-05-15 08:39:06 +0000 | [diff] [blame] | 354 | /// determining it. |
| 355 | virtual |
Duncan Sands | 83ec4b6 | 2008-06-06 12:08:01 +0000 | [diff] [blame] | 356 | MVT getOptimalMemOpType(uint64_t Size, unsigned Align, |
| 357 | bool isSrcConst, bool isSrcStr) const; |
Chris Lattner | 54e3efd | 2007-02-26 04:01:25 +0000 | [diff] [blame] | 358 | |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 359 | /// LowerOperation - Provide custom lowering hooks for some operations. |
| 360 | /// |
Dan Gohman | 475871a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 361 | virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG); |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 362 | |
Duncan Sands | 126d907 | 2008-07-04 11:47:58 +0000 | [diff] [blame] | 363 | /// ReplaceNodeResults - Replace a node with an illegal result type |
| 364 | /// with a new node built out of custom code. |
Chris Lattner | 27a6c73 | 2007-11-24 07:07:01 +0000 | [diff] [blame] | 365 | /// |
Duncan Sands | 126d907 | 2008-07-04 11:47:58 +0000 | [diff] [blame] | 366 | virtual SDNode *ReplaceNodeResults(SDNode *N, SelectionDAG &DAG); |
Chris Lattner | 27a6c73 | 2007-11-24 07:07:01 +0000 | [diff] [blame] | 367 | |
| 368 | |
Dan Gohman | 475871a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 369 | virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; |
Evan Cheng | 206ee9d | 2006-07-07 08:33:52 +0000 | [diff] [blame] | 370 | |
Evan Cheng | ff9b373 | 2008-01-30 18:18:23 +0000 | [diff] [blame] | 371 | virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI, |
| 372 | MachineBasicBlock *MBB); |
Evan Cheng | 4a46080 | 2006-01-11 00:33:36 +0000 | [diff] [blame] | 373 | |
Mon P Wang | 63307c3 | 2008-05-05 19:05:59 +0000 | [diff] [blame] | 374 | |
Evan Cheng | 7226158 | 2005-12-20 06:22:03 +0000 | [diff] [blame] | 375 | /// getTargetNodeName - This method returns the name of a target specific |
| 376 | /// DAG node. |
| 377 | virtual const char *getTargetNodeName(unsigned Opcode) const; |
| 378 | |
Scott Michel | 5b8f82e | 2008-03-10 15:42:14 +0000 | [diff] [blame] | 379 | /// getSetCCResultType - Return the ISD::SETCC ValueType |
Dan Gohman | 475871a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 380 | virtual MVT getSetCCResultType(const SDValue &) const; |
Scott Michel | 5b8f82e | 2008-03-10 15:42:14 +0000 | [diff] [blame] | 381 | |
Nate Begeman | 368e18d | 2006-02-16 21:11:51 +0000 | [diff] [blame] | 382 | /// computeMaskedBitsForTargetNode - Determine which of the bits specified |
| 383 | /// in Mask are known to be either zero or one and return them in the |
| 384 | /// KnownZero/KnownOne bitsets. |
Dan Gohman | 475871a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 385 | virtual void computeMaskedBitsForTargetNode(const SDValue Op, |
Dan Gohman | 977a76f | 2008-02-13 22:28:48 +0000 | [diff] [blame] | 386 | const APInt &Mask, |
Dan Gohman | fd29e0e | 2008-02-13 00:35:47 +0000 | [diff] [blame] | 387 | APInt &KnownZero, |
| 388 | APInt &KnownOne, |
Dan Gohman | ea859be | 2007-06-22 14:59:07 +0000 | [diff] [blame] | 389 | const SelectionDAG &DAG, |
Nate Begeman | 368e18d | 2006-02-16 21:11:51 +0000 | [diff] [blame] | 390 | unsigned Depth = 0) const; |
Evan Cheng | ad4196b | 2008-05-12 19:56:52 +0000 | [diff] [blame] | 391 | |
| 392 | virtual bool |
| 393 | isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) const; |
Nate Begeman | 368e18d | 2006-02-16 21:11:51 +0000 | [diff] [blame] | 394 | |
Dan Gohman | 475871a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 395 | SDValue getReturnAddressFrameIndex(SelectionDAG &DAG); |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 396 | |
Chris Lattner | 4234f57 | 2007-03-25 02:14:49 +0000 | [diff] [blame] | 397 | ConstraintType getConstraintType(const std::string &Constraint) const; |
Chris Lattner | f4dff84 | 2006-07-11 02:54:03 +0000 | [diff] [blame] | 398 | |
Chris Lattner | 259e97c | 2006-01-31 19:43:35 +0000 | [diff] [blame] | 399 | std::vector<unsigned> |
Chris Lattner | 1efa40f | 2006-02-22 00:56:39 +0000 | [diff] [blame] | 400 | getRegClassForInlineAsmConstraint(const std::string &Constraint, |
Duncan Sands | 83ec4b6 | 2008-06-06 12:08:01 +0000 | [diff] [blame] | 401 | MVT VT) const; |
Chris Lattner | 48884cd | 2007-08-25 00:47:38 +0000 | [diff] [blame] | 402 | |
Duncan Sands | 83ec4b6 | 2008-06-06 12:08:01 +0000 | [diff] [blame] | 403 | virtual const char *LowerXConstraint(MVT ConstraintVT) const; |
Dale Johannesen | ba2a0b9 | 2008-01-29 02:21:21 +0000 | [diff] [blame] | 404 | |
Chris Lattner | 48884cd | 2007-08-25 00:47:38 +0000 | [diff] [blame] | 405 | /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops |
| 406 | /// vector. If it is invalid, don't add anything to Ops. |
Dan Gohman | 475871a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 407 | virtual void LowerAsmOperandForConstraint(SDValue Op, |
Chris Lattner | 48884cd | 2007-08-25 00:47:38 +0000 | [diff] [blame] | 408 | char ConstraintLetter, |
Dan Gohman | 475871a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 409 | std::vector<SDValue> &Ops, |
Chris Lattner | 5e76423 | 2008-04-26 23:02:14 +0000 | [diff] [blame] | 410 | SelectionDAG &DAG) const; |
Chris Lattner | 22aaf1d | 2006-10-31 20:13:11 +0000 | [diff] [blame] | 411 | |
Chris Lattner | 9189777 | 2006-10-18 18:26:48 +0000 | [diff] [blame] | 412 | /// getRegForInlineAsmConstraint - Given a physical register constraint |
| 413 | /// (e.g. {edx}), return the register number and the register class for the |
| 414 | /// register. This should only be used for C_Register constraints. On |
| 415 | /// error, this returns a register number of 0. |
Chris Lattner | f76d180 | 2006-07-31 23:26:50 +0000 | [diff] [blame] | 416 | std::pair<unsigned, const TargetRegisterClass*> |
| 417 | getRegForInlineAsmConstraint(const std::string &Constraint, |
Duncan Sands | 83ec4b6 | 2008-06-06 12:08:01 +0000 | [diff] [blame] | 418 | MVT VT) const; |
Chris Lattner | f76d180 | 2006-07-31 23:26:50 +0000 | [diff] [blame] | 419 | |
Chris Lattner | c9addb7 | 2007-03-30 23:15:24 +0000 | [diff] [blame] | 420 | /// isLegalAddressingMode - Return true if the addressing mode represented |
| 421 | /// by AM is legal for this target, for a load/store of the specified type. |
| 422 | virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const; |
| 423 | |
Evan Cheng | 2bd122c | 2007-10-26 01:56:11 +0000 | [diff] [blame] | 424 | /// isTruncateFree - Return true if it's free to truncate a value of |
| 425 | /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in |
| 426 | /// register EAX to i16 by referencing its sub-register AX. |
| 427 | virtual bool isTruncateFree(const Type *Ty1, const Type *Ty2) const; |
Duncan Sands | 83ec4b6 | 2008-06-06 12:08:01 +0000 | [diff] [blame] | 428 | virtual bool isTruncateFree(MVT VT1, MVT VT2) const; |
Evan Cheng | 2bd122c | 2007-10-26 01:56:11 +0000 | [diff] [blame] | 429 | |
Evan Cheng | 0188ecb | 2006-03-22 18:59:22 +0000 | [diff] [blame] | 430 | /// isShuffleMaskLegal - Targets can use this to indicate that they only |
| 431 | /// support *some* VECTOR_SHUFFLE operations, those with specific masks. |
Chris Lattner | 9189777 | 2006-10-18 18:26:48 +0000 | [diff] [blame] | 432 | /// By default, if a target supports the VECTOR_SHUFFLE node, all mask |
| 433 | /// values are assumed to be legal. |
Dan Gohman | 475871a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 434 | virtual bool isShuffleMaskLegal(SDValue Mask, MVT VT) const; |
Evan Cheng | 39623da | 2006-04-20 08:58:49 +0000 | [diff] [blame] | 435 | |
| 436 | /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is |
| 437 | /// used by Targets can use this to indicate if there is a suitable |
| 438 | /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant |
| 439 | /// pool entry. |
Dan Gohman | 475871a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 440 | virtual bool isVectorClearMaskLegal(const std::vector<SDValue> &BVOps, |
Duncan Sands | 83ec4b6 | 2008-06-06 12:08:01 +0000 | [diff] [blame] | 441 | MVT EVT, SelectionDAG &DAG) const; |
Evan Cheng | 6fd599f | 2008-03-05 01:30:59 +0000 | [diff] [blame] | 442 | |
| 443 | /// ShouldShrinkFPConstant - If true, then instruction selection should |
| 444 | /// seek to shrink the FP constant of the specified type to a smaller type |
| 445 | /// in order to save space and / or reduce runtime. |
Duncan Sands | 83ec4b6 | 2008-06-06 12:08:01 +0000 | [diff] [blame] | 446 | virtual bool ShouldShrinkFPConstant(MVT VT) const { |
Evan Cheng | 6fd599f | 2008-03-05 01:30:59 +0000 | [diff] [blame] | 447 | // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more |
| 448 | // expensive than a straight movsd. On the other hand, it's important to |
| 449 | // shrink long double fp constant since fldt is very slow. |
| 450 | return !X86ScalarSSEf64 || VT == MVT::f80; |
| 451 | } |
Arnold Schwaighofer | c85e171 | 2007-10-11 19:40:01 +0000 | [diff] [blame] | 452 | |
| 453 | /// IsEligibleForTailCallOptimization - Check whether the call is eligible |
| 454 | /// for tail call optimization. Target which want to do tail call |
| 455 | /// optimization should implement this function. |
Dan Gohman | 475871a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 456 | virtual bool IsEligibleForTailCallOptimization(SDValue Call, |
| 457 | SDValue Ret, |
Arnold Schwaighofer | c85e171 | 2007-10-11 19:40:01 +0000 | [diff] [blame] | 458 | SelectionDAG &DAG) const; |
| 459 | |
Dan Gohman | 707e018 | 2008-04-12 04:36:06 +0000 | [diff] [blame] | 460 | virtual const X86Subtarget* getSubtarget() { |
| 461 | return Subtarget; |
Rafael Espindola | f1ba1ca | 2007-11-05 23:12:20 +0000 | [diff] [blame] | 462 | } |
| 463 | |
Chris Lattner | 3d66185 | 2008-01-18 06:52:41 +0000 | [diff] [blame] | 464 | /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is |
| 465 | /// computed in an SSE register, not on the X87 floating point stack. |
Duncan Sands | 83ec4b6 | 2008-06-06 12:08:01 +0000 | [diff] [blame] | 466 | bool isScalarFPTypeInSSEReg(MVT VT) const { |
Chris Lattner | 3d66185 | 2008-01-18 06:52:41 +0000 | [diff] [blame] | 467 | return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2 |
| 468 | (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1 |
| 469 | } |
Dan Gohman | d9f3c48 | 2008-08-19 21:32:53 +0000 | [diff] [blame] | 470 | |
| 471 | /// createFastISel - This method returns a target specific FastISel object, |
| 472 | /// or null if the target does not support "fast" ISel. |
Dan Gohman | bb46633 | 2008-08-20 21:05:57 +0000 | [diff] [blame] | 473 | virtual FastISel *createFastISel(MachineFunction &mf); |
Chris Lattner | 3d66185 | 2008-01-18 06:52:41 +0000 | [diff] [blame] | 474 | |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 475 | private: |
Evan Cheng | 0db9fe6 | 2006-04-25 20:13:52 +0000 | [diff] [blame] | 476 | /// Subtarget - Keep a pointer to the X86Subtarget around so that we can |
| 477 | /// make the right decision when generating code for different targets. |
| 478 | const X86Subtarget *Subtarget; |
Dan Gohman | c9f5f3f | 2008-05-14 01:58:56 +0000 | [diff] [blame] | 479 | const X86RegisterInfo *RegInfo; |
Evan Cheng | 0db9fe6 | 2006-04-25 20:13:52 +0000 | [diff] [blame] | 480 | |
Evan Cheng | 25ab690 | 2006-09-08 06:48:29 +0000 | [diff] [blame] | 481 | /// X86StackPtr - X86 physical register used as stack ptr. |
| 482 | unsigned X86StackPtr; |
Arnold Schwaighofer | c85e171 | 2007-10-11 19:40:01 +0000 | [diff] [blame] | 483 | |
Dale Johannesen | f1fc3a8 | 2007-09-23 14:52:20 +0000 | [diff] [blame] | 484 | /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87 |
| 485 | /// floating point ops. |
| 486 | /// When SSE is available, use it for f32 operations. |
| 487 | /// When SSE2 is available, use it for f64 operations. |
| 488 | bool X86ScalarSSEf32; |
| 489 | bool X86ScalarSSEf64; |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 490 | |
Dan Gohman | 475871a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 491 | SDNode *LowerCallResult(SDValue Chain, SDValue InFlag, SDNode*TheCall, |
Chris Lattner | 3085e15 | 2007-02-25 08:59:22 +0000 | [diff] [blame] | 492 | unsigned CallingConv, SelectionDAG &DAG); |
Evan Cheng | 0d9e976 | 2008-01-29 19:34:22 +0000 | [diff] [blame] | 493 | |
Dan Gohman | 475871a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 494 | SDValue LowerMemArgument(SDValue Op, SelectionDAG &DAG, |
Rafael Espindola | 7effac5 | 2007-09-14 15:48:13 +0000 | [diff] [blame] | 495 | const CCValAssign &VA, MachineFrameInfo *MFI, |
Dan Gohman | 475871a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 496 | unsigned CC, SDValue Root, unsigned i); |
Rafael Espindola | 7effac5 | 2007-09-14 15:48:13 +0000 | [diff] [blame] | 497 | |
Dan Gohman | 475871a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 498 | SDValue LowerMemOpCallTo(SDValue Op, SelectionDAG &DAG, |
| 499 | const SDValue &StackPtr, |
| 500 | const CCValAssign &VA, SDValue Chain, |
| 501 | SDValue Arg); |
Rafael Espindola | 1b5dcc3 | 2007-08-31 15:06:30 +0000 | [diff] [blame] | 502 | |
Gordon Henriksen | 8673766 | 2008-01-05 16:56:59 +0000 | [diff] [blame] | 503 | // Call lowering helpers. |
Dan Gohman | 475871a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 504 | bool IsCalleePop(SDValue Op); |
Arnold Schwaighofer | 258bb1b | 2008-02-26 22:21:54 +0000 | [diff] [blame] | 505 | bool CallRequiresGOTPtrInReg(bool Is64Bit, bool IsTailCall); |
| 506 | bool CallRequiresFnAddressInReg(bool Is64Bit, bool IsTailCall); |
Dan Gohman | 475871a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 507 | SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr, |
| 508 | SDValue Chain, bool IsTailCall, bool Is64Bit, |
Arnold Schwaighofer | 4b5324a | 2008-04-12 18:11:06 +0000 | [diff] [blame] | 509 | int FPDiff); |
Arnold Schwaighofer | 4b5324a | 2008-04-12 18:11:06 +0000 | [diff] [blame] | 510 | |
Dan Gohman | 475871a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 511 | CCAssignFn *CCAssignFnForNode(SDValue Op) const; |
| 512 | NameDecorationStyle NameDecorationForFORMAL_ARGUMENTS(SDValue Op); |
Arnold Schwaighofer | c85e171 | 2007-10-11 19:40:01 +0000 | [diff] [blame] | 513 | unsigned GetAlignedArgumentStackSize(unsigned StackSize, SelectionDAG &DAG); |
Evan Cheng | 559806f | 2006-01-27 08:10:46 +0000 | [diff] [blame] | 514 | |
Dan Gohman | 475871a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 515 | std::pair<SDValue,SDValue> FP_TO_SINTHelper(SDValue Op, |
Chris Lattner | 27a6c73 | 2007-11-24 07:07:01 +0000 | [diff] [blame] | 516 | SelectionDAG &DAG); |
| 517 | |
Dan Gohman | 475871a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 518 | SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG); |
| 519 | SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG); |
| 520 | SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG); |
| 521 | SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG); |
| 522 | SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG); |
| 523 | SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG); |
| 524 | SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG); |
| 525 | SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG); |
| 526 | SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG); |
| 527 | SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG); |
| 528 | SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG); |
| 529 | SDValue LowerShift(SDValue Op, SelectionDAG &DAG); |
| 530 | SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG); |
| 531 | SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG); |
| 532 | SDValue LowerFABS(SDValue Op, SelectionDAG &DAG); |
| 533 | SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG); |
| 534 | SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG); |
| 535 | SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG); |
| 536 | SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG); |
| 537 | SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG); |
| 538 | SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG); |
| 539 | SDValue LowerMEMSET(SDValue Op, SelectionDAG &DAG); |
| 540 | SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG); |
| 541 | SDValue LowerCALL(SDValue Op, SelectionDAG &DAG); |
| 542 | SDValue LowerRET(SDValue Op, SelectionDAG &DAG); |
| 543 | SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG); |
| 544 | SDValue LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG); |
| 545 | SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG); |
| 546 | SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG); |
| 547 | SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG); |
| 548 | SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG); |
| 549 | SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG); |
| 550 | SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG); |
| 551 | SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG); |
| 552 | SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG); |
| 553 | SDValue LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG); |
| 554 | SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG); |
| 555 | SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG); |
| 556 | SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG); |
| 557 | SDValue LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG); |
Chris Lattner | 27a6c73 | 2007-11-24 07:07:01 +0000 | [diff] [blame] | 558 | SDNode *ExpandFP_TO_SINT(SDNode *N, SelectionDAG &DAG); |
| 559 | SDNode *ExpandREADCYCLECOUNTER(SDNode *N, SelectionDAG &DAG); |
Mon P Wang | 2887310 | 2008-06-25 08:15:39 +0000 | [diff] [blame] | 560 | SDNode *ExpandATOMIC_CMP_SWAP(SDNode *N, SelectionDAG &DAG); |
| 561 | SDNode *ExpandATOMIC_LOAD_SUB(SDNode *N, SelectionDAG &DAG); |
Mon P Wang | 63307c3 | 2008-05-05 19:05:59 +0000 | [diff] [blame] | 562 | |
Dan Gohman | 475871a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 563 | SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, |
| 564 | SDValue Chain, |
| 565 | SDValue Dst, SDValue Src, |
| 566 | SDValue Size, unsigned Align, |
Dan Gohman | 1f13c68 | 2008-04-28 17:15:20 +0000 | [diff] [blame] | 567 | const Value *DstSV, uint64_t DstSVOff); |
Dan Gohman | 475871a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 568 | SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, |
| 569 | SDValue Chain, |
| 570 | SDValue Dst, SDValue Src, |
| 571 | SDValue Size, unsigned Align, |
Dan Gohman | 707e018 | 2008-04-12 04:36:06 +0000 | [diff] [blame] | 572 | bool AlwaysInline, |
Dan Gohman | 1f13c68 | 2008-04-28 17:15:20 +0000 | [diff] [blame] | 573 | const Value *DstSV, uint64_t DstSVOff, |
| 574 | const Value *SrcSV, uint64_t SrcSVOff); |
Mon P Wang | 63307c3 | 2008-05-05 19:05:59 +0000 | [diff] [blame] | 575 | |
| 576 | /// Utility function to emit atomic bitwise operations (and, or, xor). |
| 577 | // It takes the bitwise instruction to expand, the associated machine basic |
| 578 | // block, and the associated X86 opcodes for reg/reg and reg/imm. |
| 579 | MachineBasicBlock *EmitAtomicBitwiseWithCustomInserter( |
| 580 | MachineInstr *BInstr, |
| 581 | MachineBasicBlock *BB, |
| 582 | unsigned regOpc, |
Andrew Lenharth | 507a58a | 2008-06-14 05:48:15 +0000 | [diff] [blame] | 583 | unsigned immOpc, |
Dale Johannesen | 140be2d | 2008-08-19 18:47:28 +0000 | [diff] [blame] | 584 | unsigned loadOpc, |
| 585 | unsigned cxchgOpc, |
| 586 | unsigned copyOpc, |
| 587 | unsigned notOpc, |
| 588 | unsigned EAXreg, |
| 589 | TargetRegisterClass *RC, |
Andrew Lenharth | 507a58a | 2008-06-14 05:48:15 +0000 | [diff] [blame] | 590 | bool invSrc = false); |
Mon P Wang | 63307c3 | 2008-05-05 19:05:59 +0000 | [diff] [blame] | 591 | |
| 592 | /// Utility function to emit atomic min and max. It takes the min/max |
| 593 | // instruction to expand, the associated basic block, and the associated |
| 594 | // cmov opcode for moving the min or max value. |
| 595 | MachineBasicBlock *EmitAtomicMinMaxWithCustomInserter(MachineInstr *BInstr, |
| 596 | MachineBasicBlock *BB, |
| 597 | unsigned cmovOpc); |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 598 | }; |
Evan Cheng | c3f44b0 | 2008-09-03 00:03:49 +0000 | [diff] [blame] | 599 | |
| 600 | namespace X86 { |
| 601 | FastISel *createFastISel(MachineFunction &mf); |
| 602 | } |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 603 | } |
| 604 | |
Chris Lattner | dbdbf0c | 2005-11-15 00:40:23 +0000 | [diff] [blame] | 605 | #endif // X86ISELLOWERING_H |